aarch64: fix context and hybrid hardware breakpoints
[fw/openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36         struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38         struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40         struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42         struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45         target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49         uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53         int retval = ERROR_OK;
54
55         struct aarch64_common *aarch64 = target_to_aarch64(target);
56         struct armv8_common *armv8 = target_to_armv8(target);
57
58         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60                 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61                                                      0xd5181000,
62                                                      aarch64->system_control_reg);
63         }
64
65         return retval;
66 }
67
68 /*  check address before aarch64_apb read write access with mmu on
69  *  remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72         /* TODO */
73         return ERROR_OK;
74 }
75 /*  modify system_control_reg in order to enable or disable mmu for :
76  *  - virt2phys address conversion
77  *  - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80         struct aarch64_common *aarch64 = target_to_aarch64(target);
81         struct armv8_common *armv8 = &aarch64->armv8_common;
82         int retval = ERROR_OK;
83
84         if (enable) {
85                 /*  if mmu enabled at target stop and mmu not enable */
86                 if (!(aarch64->system_control_reg & 0x1U)) {
87                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88                         return ERROR_FAIL;
89                 }
90                 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91                         aarch64->system_control_reg_curr |= 0x1U;
92                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93                                                              0xd5181000,
94                                                              aarch64->system_control_reg_curr);
95                 }
96         } else {
97                 if (aarch64->system_control_reg_curr & 0x4U) {
98                         /*  data cache is active */
99                         aarch64->system_control_reg_curr &= ~0x4U;
100                         /* flush data cache armv7 function to be called */
101                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103                 }
104                 if ((aarch64->system_control_reg_curr & 0x1U)) {
105                         aarch64->system_control_reg_curr &= ~0x1U;
106                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107                                                              0xd5181000,
108                                                              aarch64->system_control_reg_curr);
109                 }
110         }
111         return retval;
112 }
113
114 /*
115  * Basic debug access, very low level assumes state is saved
116  */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119         struct armv8_common *armv8 = target_to_armv8(target);
120         int retval;
121         uint32_t dummy;
122
123         LOG_DEBUG(" ");
124
125         /* Unlocking the debug registers for modification
126          * The debugport might be uninitialised so try twice */
127         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128                              armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
129         if (retval != ERROR_OK) {
130                 /* try again */
131                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132                              armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
133                 if (retval == ERROR_OK)
134                         LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135         }
136         if (retval != ERROR_OK)
137                 return retval;
138         /* Clear Sticky Power Down status Bit in PRSR to enable access to
139            the registers in the Core Power Domain */
140         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
142         if (retval != ERROR_OK)
143                 return retval;
144
145         /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147         /* Resync breakpoint registers */
148
149         /* Since this is likely called from init or reset, update target state information*/
150         return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154  * DSCR value.  Initialize it to zero if you just need to know the
155  * value on return from this function; or DSCR_ITE if you
156  * happen to know that no instruction is pending.
157  */
158 static int aarch64_exec_opcode(struct target *target,
159         uint32_t opcode, uint32_t *dscr_p)
160 {
161         uint32_t dscr;
162         int retval;
163         struct armv8_common *armv8 = target_to_armv8(target);
164         dscr = dscr_p ? *dscr_p : 0;
165
166         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168         /* Wait for InstrCompl bit to be set */
169         long long then = timeval_ms();
170         while ((dscr & DSCR_ITE) == 0) {
171                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
173                 if (retval != ERROR_OK) {
174                         LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175                         return retval;
176                 }
177                 if (timeval_ms() > then + 1000) {
178                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179                         return ERROR_FAIL;
180                 }
181         }
182
183         retval = mem_ap_write_u32(armv8->debug_ap,
184                         armv8->debug_base + CPUV8_DBG_ITR, opcode);
185         if (retval != ERROR_OK)
186                 return retval;
187
188         then = timeval_ms();
189         do {
190                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
192                 if (retval != ERROR_OK) {
193                         LOG_ERROR("Could not read DSCR register");
194                         return retval;
195                 }
196                 if (timeval_ms() > then + 1000) {
197                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198                         return ERROR_FAIL;
199                 }
200         } while ((dscr & DSCR_ITE) == 0);       /* Wait for InstrCompl bit to be set */
201
202         if (dscr_p)
203                 *dscr_p = dscr;
204
205         return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210         uint32_t address,
211         uint32_t value)
212 {
213         int retval;
214         struct armv8_common *armv8 = target_to_armv8(target);
215
216         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218         return retval;
219 }
220
221 /*
222  * AARCH64 implementation of Debug Programmer's Model
223  *
224  * NOTE the invariant:  these routines return with DSCR_ITE set,
225  * so there's no need to poll for it before executing an instruction.
226  *
227  * NOTE that in several of these cases the "stall" mode might be useful.
228  * It'd let us queue a few operations together... prepare/finish might
229  * be the places to enable/disable that mode.
230  */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234         return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
238 {
239         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240         return mem_ap_write_u32(armv8->debug_ap,
241                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
245 {
246         int ret;
247         LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
248         LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
249         ret = mem_ap_write_u32(armv8->debug_ap,
250                                armv8->debug_base + CPUV8_DBG_DTRRX, data);
251         ret += mem_ap_write_u32(armv8->debug_ap,
252                                 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
253         return ret;
254 }
255
256 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
257         uint32_t *dscr_p)
258 {
259         uint32_t dscr = DSCR_ITE;
260         int retval;
261
262         if (dscr_p)
263                 dscr = *dscr_p;
264
265         /* Wait for DTRRXfull */
266         long long then = timeval_ms();
267         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
269                                 armv8->debug_base + CPUV8_DBG_DSCR,
270                                 &dscr);
271                 if (retval != ERROR_OK)
272                         return retval;
273                 if (timeval_ms() > then + 1000) {
274                         LOG_ERROR("Timeout waiting for read dcc");
275                         return ERROR_FAIL;
276                 }
277         }
278
279         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280                                             armv8->debug_base + CPUV8_DBG_DTRTX,
281                                             data);
282         if (retval != ERROR_OK)
283                 return retval;
284         LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286         if (dscr_p)
287                 *dscr_p = dscr;
288
289         return retval;
290 }
291
292 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
293         uint32_t *dscr_p)
294 {
295         uint32_t dscr = DSCR_ITE;
296         uint32_t higher;
297         int retval;
298
299         if (dscr_p)
300                 dscr = *dscr_p;
301
302         /* Wait for DTRRXfull */
303         long long then = timeval_ms();
304         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
305                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
306                                 armv8->debug_base + CPUV8_DBG_DSCR,
307                                 &dscr);
308                 if (retval != ERROR_OK)
309                         return retval;
310                 if (timeval_ms() > then + 1000) {
311                         LOG_ERROR("Timeout waiting for read dcc");
312                         return ERROR_FAIL;
313                 }
314         }
315
316         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
317                                             armv8->debug_base + CPUV8_DBG_DTRTX,
318                                             (uint32_t *)data);
319         if (retval != ERROR_OK)
320                 return retval;
321
322         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
323                                             armv8->debug_base + CPUV8_DBG_DTRRX,
324                                             &higher);
325         if (retval != ERROR_OK)
326                 return retval;
327
328         *data = *(uint32_t *)data | (uint64_t)higher << 32;
329         LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
330
331         if (dscr_p)
332                 *dscr_p = dscr;
333
334         return retval;
335 }
336
337 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
338 {
339         struct aarch64_common *a8 = dpm_to_a8(dpm);
340         uint32_t dscr;
341         int retval;
342
343         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
344         long long then = timeval_ms();
345         for (;; ) {
346                 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
347                                 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
348                                 &dscr);
349                 if (retval != ERROR_OK)
350                         return retval;
351                 if ((dscr & DSCR_ITE) != 0)
352                         break;
353                 if (timeval_ms() > then + 1000) {
354                         LOG_ERROR("Timeout waiting for dpm prepare");
355                         return ERROR_FAIL;
356                 }
357         }
358
359         /* this "should never happen" ... */
360         if (dscr & DSCR_DTR_RX_FULL) {
361                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
362                 /* Clear DCCRX */
363                 retval = aarch64_exec_opcode(
364                                 a8->armv8_common.arm.target,
365                                 0xd5130400,
366                                 &dscr);
367                 if (retval != ERROR_OK)
368                         return retval;
369         }
370
371         return retval;
372 }
373
374 static int aarch64_dpm_finish(struct arm_dpm *dpm)
375 {
376         /* REVISIT what could be done here? */
377         return ERROR_OK;
378 }
379
380 static int aarch64_instr_execute(struct arm_dpm *dpm,
381         uint32_t opcode)
382 {
383         struct aarch64_common *a8 = dpm_to_a8(dpm);
384         uint32_t dscr = DSCR_ITE;
385
386         return aarch64_exec_opcode(
387                         a8->armv8_common.arm.target,
388                         opcode,
389                         &dscr);
390 }
391
392 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
393         uint32_t opcode, uint32_t data)
394 {
395         struct aarch64_common *a8 = dpm_to_a8(dpm);
396         int retval;
397         uint32_t dscr = DSCR_ITE;
398
399         retval = aarch64_write_dcc(&a8->armv8_common, data);
400         if (retval != ERROR_OK)
401                 return retval;
402
403         return aarch64_exec_opcode(
404                         a8->armv8_common.arm.target,
405                         opcode,
406                         &dscr);
407 }
408
409 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
410         uint32_t opcode, uint64_t data)
411 {
412         struct aarch64_common *a8 = dpm_to_a8(dpm);
413         int retval;
414         uint32_t dscr = DSCR_ITE;
415
416         retval = aarch64_write_dcc_64(&a8->armv8_common, data);
417         if (retval != ERROR_OK)
418                 return retval;
419
420         return aarch64_exec_opcode(
421                         a8->armv8_common.arm.target,
422                         opcode,
423                         &dscr);
424 }
425
426 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
427         uint32_t opcode, uint32_t data)
428 {
429         struct aarch64_common *a8 = dpm_to_a8(dpm);
430         uint32_t dscr = DSCR_ITE;
431         int retval;
432
433         retval = aarch64_write_dcc(&a8->armv8_common, data);
434         if (retval != ERROR_OK)
435                 return retval;
436
437         retval = aarch64_exec_opcode(
438                         a8->armv8_common.arm.target,
439                         0xd5330500,
440                         &dscr);
441         if (retval != ERROR_OK)
442                 return retval;
443
444         /* then the opcode, taking data from R0 */
445         retval = aarch64_exec_opcode(
446                         a8->armv8_common.arm.target,
447                         opcode,
448                         &dscr);
449
450         return retval;
451 }
452
453 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
454         uint32_t opcode, uint64_t data)
455 {
456         struct aarch64_common *a8 = dpm_to_a8(dpm);
457         uint32_t dscr = DSCR_ITE;
458         int retval;
459
460         retval = aarch64_write_dcc_64(&a8->armv8_common, data);
461         if (retval != ERROR_OK)
462                 return retval;
463
464         retval = aarch64_exec_opcode(
465                         a8->armv8_common.arm.target,
466                         0xd5330400,
467                         &dscr);
468         if (retval != ERROR_OK)
469                 return retval;
470
471         /* then the opcode, taking data from R0 */
472         retval = aarch64_exec_opcode(
473                         a8->armv8_common.arm.target,
474                         opcode,
475                         &dscr);
476
477         return retval;
478 }
479
480 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
481 {
482         struct target *target = dpm->arm->target;
483         uint32_t dscr = DSCR_ITE;
484
485         /* "Prefetch flush" after modifying execution status in CPSR */
486         return aarch64_exec_opcode(target,
487                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
488                         &dscr);
489 }
490
491 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
492         uint32_t opcode, uint32_t *data)
493 {
494         struct aarch64_common *a8 = dpm_to_a8(dpm);
495         int retval;
496         uint32_t dscr = DSCR_ITE;
497
498         /* the opcode, writing data to DCC */
499         retval = aarch64_exec_opcode(
500                         a8->armv8_common.arm.target,
501                         opcode,
502                         &dscr);
503         if (retval != ERROR_OK)
504                 return retval;
505
506         return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
507 }
508
509 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
510         uint32_t opcode, uint64_t *data)
511 {
512         struct aarch64_common *a8 = dpm_to_a8(dpm);
513         int retval;
514         uint32_t dscr = DSCR_ITE;
515
516         /* the opcode, writing data to DCC */
517         retval = aarch64_exec_opcode(
518                         a8->armv8_common.arm.target,
519                         opcode,
520                         &dscr);
521         if (retval != ERROR_OK)
522                 return retval;
523
524         return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
525 }
526
527 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
528         uint32_t opcode, uint32_t *data)
529 {
530         struct aarch64_common *a8 = dpm_to_a8(dpm);
531         uint32_t dscr = DSCR_ITE;
532         int retval;
533
534         /* the opcode, writing data to R0 */
535         retval = aarch64_exec_opcode(
536                         a8->armv8_common.arm.target,
537                         opcode,
538                         &dscr);
539         if (retval != ERROR_OK)
540                 return retval;
541
542         /* write R0 to DCC */
543         retval = aarch64_exec_opcode(
544                         a8->armv8_common.arm.target,
545                         0xd5130400,  /* msr dbgdtr_el0, x0 */
546                         &dscr);
547         if (retval != ERROR_OK)
548                 return retval;
549
550         return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
551 }
552
553 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
554         uint32_t opcode, uint64_t *data)
555 {
556         struct aarch64_common *a8 = dpm_to_a8(dpm);
557         uint32_t dscr = DSCR_ITE;
558         int retval;
559
560         /* the opcode, writing data to R0 */
561         retval = aarch64_exec_opcode(
562                         a8->armv8_common.arm.target,
563                         opcode,
564                         &dscr);
565         if (retval != ERROR_OK)
566                 return retval;
567
568         /* write R0 to DCC */
569         retval = aarch64_exec_opcode(
570                         a8->armv8_common.arm.target,
571                         0xd5130400,  /* msr dbgdtr_el0, x0 */
572                         &dscr);
573         if (retval != ERROR_OK)
574                 return retval;
575
576         return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
577 }
578
579 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
580         uint32_t addr, uint32_t control)
581 {
582         struct aarch64_common *a8 = dpm_to_a8(dpm);
583         uint32_t vr = a8->armv8_common.debug_base;
584         uint32_t cr = a8->armv8_common.debug_base;
585         int retval;
586
587         switch (index_t) {
588                 case 0 ... 15:  /* breakpoints */
589                         vr += CPUV8_DBG_BVR_BASE;
590                         cr += CPUV8_DBG_BCR_BASE;
591                         break;
592                 case 16 ... 31: /* watchpoints */
593                         vr += CPUV8_DBG_WVR_BASE;
594                         cr += CPUV8_DBG_WCR_BASE;
595                         index_t -= 16;
596                         break;
597                 default:
598                         return ERROR_FAIL;
599         }
600         vr += 4 * index_t;
601         cr += 4 * index_t;
602
603         LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
604                 (unsigned) vr, (unsigned) cr);
605
606         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
607                         vr, addr);
608         if (retval != ERROR_OK)
609                 return retval;
610         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
611                         cr, control);
612         return retval;
613 }
614
615 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
616 {
617         return ERROR_OK;
618
619 #if 0
620         struct aarch64_common *a = dpm_to_a8(dpm);
621         uint32_t cr;
622
623         switch (index_t) {
624                 case 0 ... 15:
625                         cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
626                         break;
627                 case 16 ... 31:
628                         cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
629                         index_t -= 16;
630                         break;
631                 default:
632                         return ERROR_FAIL;
633         }
634         cr += 4 * index_t;
635
636         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
637
638         /* clear control register */
639         return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
640 #endif
641 }
642
643 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
644 {
645         struct arm_dpm *dpm = &a8->armv8_common.dpm;
646         int retval;
647
648         dpm->arm = &a8->armv8_common.arm;
649         dpm->didr = debug;
650
651         dpm->prepare = aarch64_dpm_prepare;
652         dpm->finish = aarch64_dpm_finish;
653
654         dpm->instr_execute = aarch64_instr_execute;
655         dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
656         dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
657         dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
658         dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
659         dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
660
661         dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
662         dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
663         dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
664         dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
665
666         dpm->arm_reg_current = armv8_reg_current;
667
668         dpm->bpwp_enable = aarch64_bpwp_enable;
669         dpm->bpwp_disable = aarch64_bpwp_disable;
670
671         retval = armv8_dpm_setup(dpm);
672         if (retval == ERROR_OK)
673                 retval = armv8_dpm_initialize(dpm);
674
675         return retval;
676 }
677 static struct target *get_aarch64(struct target *target, int32_t coreid)
678 {
679         struct target_list *head;
680         struct target *curr;
681
682         head = target->head;
683         while (head != (struct target_list *)NULL) {
684                 curr = head->target;
685                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
686                         return curr;
687                 head = head->next;
688         }
689         return target;
690 }
691 static int aarch64_halt(struct target *target);
692
693 static int aarch64_halt_smp(struct target *target)
694 {
695         int retval = 0;
696         struct target_list *head;
697         struct target *curr;
698         head = target->head;
699         while (head != (struct target_list *)NULL) {
700                 curr = head->target;
701                 if ((curr != target) && (curr->state != TARGET_HALTED))
702                         retval += aarch64_halt(curr);
703                 head = head->next;
704         }
705         return retval;
706 }
707
708 static int update_halt_gdb(struct target *target)
709 {
710         int retval = 0;
711         if (target->gdb_service && target->gdb_service->core[0] == -1) {
712                 target->gdb_service->target = target;
713                 target->gdb_service->core[0] = target->coreid;
714                 retval += aarch64_halt_smp(target);
715         }
716         return retval;
717 }
718
719 /*
720  * Cortex-A8 Run control
721  */
722
723 static int aarch64_poll(struct target *target)
724 {
725         int retval = ERROR_OK;
726         uint32_t dscr;
727         struct aarch64_common *aarch64 = target_to_aarch64(target);
728         struct armv8_common *armv8 = &aarch64->armv8_common;
729         enum target_state prev_target_state = target->state;
730         /*  toggle to another core is done by gdb as follow */
731         /*  maint packet J core_id */
732         /*  continue */
733         /*  the next polling trigger an halt event sent to gdb */
734         if ((target->state == TARGET_HALTED) && (target->smp) &&
735                 (target->gdb_service) &&
736                 (target->gdb_service->target == NULL)) {
737                 target->gdb_service->target =
738                         get_aarch64(target, target->gdb_service->core[1]);
739                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
740                 return retval;
741         }
742         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
743                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
744         if (retval != ERROR_OK)
745                 return retval;
746         aarch64->cpudbg_dscr = dscr;
747
748         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
749                 if (prev_target_state != TARGET_HALTED) {
750                         /* We have a halting debug event */
751                         LOG_DEBUG("Target halted");
752                         target->state = TARGET_HALTED;
753                         if ((prev_target_state == TARGET_RUNNING)
754                                 || (prev_target_state == TARGET_UNKNOWN)
755                                 || (prev_target_state == TARGET_RESET)) {
756                                 retval = aarch64_debug_entry(target);
757                                 if (retval != ERROR_OK)
758                                         return retval;
759                                 if (target->smp) {
760                                         retval = update_halt_gdb(target);
761                                         if (retval != ERROR_OK)
762                                                 return retval;
763                                 }
764                                 target_call_event_callbacks(target,
765                                         TARGET_EVENT_HALTED);
766                         }
767                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
768                                 LOG_DEBUG(" ");
769
770                                 retval = aarch64_debug_entry(target);
771                                 if (retval != ERROR_OK)
772                                         return retval;
773                                 if (target->smp) {
774                                         retval = update_halt_gdb(target);
775                                         if (retval != ERROR_OK)
776                                                 return retval;
777                                 }
778
779                                 target_call_event_callbacks(target,
780                                         TARGET_EVENT_DEBUG_HALTED);
781                         }
782                 }
783         } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
784                 target->state = TARGET_RUNNING;
785         else {
786                 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
787                 target->state = TARGET_UNKNOWN;
788         }
789
790         return retval;
791 }
792
793 static int aarch64_halt(struct target *target)
794 {
795         int retval = ERROR_OK;
796         uint32_t dscr;
797         struct armv8_common *armv8 = target_to_armv8(target);
798
799         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
800                         armv8->debug_base + 0x10000 + 0, &dscr);
801         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
802                         armv8->debug_base + 0x10000 + 0, 1);
803         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
804                         armv8->debug_base + 0x10000 + 0, &dscr);
805
806         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
807                         armv8->debug_base + 0x10000 + 0x140, &dscr);
808         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
809                         armv8->debug_base + 0x10000 + 0x140, 6);
810         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
811                         armv8->debug_base + 0x10000 + 0x140, &dscr);
812
813         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
814                         armv8->debug_base + 0x10000 + 0xa0, &dscr);
815         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
816                         armv8->debug_base + 0x10000 + 0xa0, 5);
817         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
818                         armv8->debug_base + 0x10000 + 0xa0, &dscr);
819
820         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
821                         armv8->debug_base + 0x10000 + 0xa4, &dscr);
822         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
823                         armv8->debug_base + 0x10000 + 0xa4, 2);
824         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
825                         armv8->debug_base + 0x10000 + 0xa4, &dscr);
826
827         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
828                         armv8->debug_base + 0x10000 + 0x20, &dscr);
829         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
830                         armv8->debug_base + 0x10000 + 0x20, 4);
831         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
832                         armv8->debug_base + 0x10000 + 0x20, &dscr);
833
834         /*
835          * enter halting debug mode
836          */
837         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
838                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
839         if (retval != ERROR_OK)
840                 return retval;
841
842 #       /* STATUS */
843         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
844                         armv8->debug_base + 0x10000 + 0x134, &dscr);
845
846         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
847                         armv8->debug_base + 0x10000 + 0x1c, &dscr);
848         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
849                         armv8->debug_base + 0x10000 + 0x1c, 1);
850         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
851                         armv8->debug_base + 0x10000 + 0x1c, &dscr);
852
853
854         long long then = timeval_ms();
855         for (;; ) {
856                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
857                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
858                 if (retval != ERROR_OK)
859                         return retval;
860                 if ((dscr & DSCR_CORE_HALTED) != 0)
861                         break;
862                 if (timeval_ms() > then + 1000) {
863                         LOG_ERROR("Timeout waiting for halt");
864                         return ERROR_FAIL;
865                 }
866         }
867
868         target->debug_reason = DBG_REASON_DBGRQ;
869
870         return ERROR_OK;
871 }
872
873 static int aarch64_internal_restore(struct target *target, int current,
874         uint64_t *address, int handle_breakpoints, int debug_execution)
875 {
876         struct armv8_common *armv8 = target_to_armv8(target);
877         struct arm *arm = &armv8->arm;
878         int retval;
879         uint64_t resume_pc;
880
881         if (!debug_execution)
882                 target_free_all_working_areas(target);
883
884         /* current = 1: continue on current pc, otherwise continue at <address> */
885         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
886         if (!current)
887                 resume_pc = *address;
888         else
889                 *address = resume_pc;
890
891         /* Make sure that the Armv7 gdb thumb fixups does not
892          * kill the return address
893          */
894         switch (arm->core_state) {
895                 case ARM_STATE_ARM:
896                         resume_pc &= 0xFFFFFFFC;
897                         break;
898                 case ARM_STATE_AARCH64:
899                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
900                         break;
901                 case ARM_STATE_THUMB:
902                 case ARM_STATE_THUMB_EE:
903                         /* When the return address is loaded into PC
904                          * bit 0 must be 1 to stay in Thumb state
905                          */
906                         resume_pc |= 0x1;
907                         break;
908                 case ARM_STATE_JAZELLE:
909                         LOG_ERROR("How do I resume into Jazelle state??");
910                         return ERROR_FAIL;
911         }
912         LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
913         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
914         arm->pc->dirty = 1;
915         arm->pc->valid = 1;
916         dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
917
918         /* called it now before restoring context because it uses cpu
919          * register r0 for restoring system control register */
920         retval = aarch64_restore_system_control_reg(target);
921         if (retval != ERROR_OK)
922                 return retval;
923         retval = aarch64_restore_context(target, handle_breakpoints);
924         if (retval != ERROR_OK)
925                 return retval;
926         target->debug_reason = DBG_REASON_NOTHALTED;
927         target->state = TARGET_RUNNING;
928
929         /* registers are now invalid */
930         register_cache_invalidate(arm->core_cache);
931
932 #if 0
933         /* the front-end may request us not to handle breakpoints */
934         if (handle_breakpoints) {
935                 /* Single step past breakpoint at current address */
936                 breakpoint = breakpoint_find(target, resume_pc);
937                 if (breakpoint) {
938                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
939                         cortex_m3_unset_breakpoint(target, breakpoint);
940                         cortex_m3_single_step_core(target);
941                         cortex_m3_set_breakpoint(target, breakpoint);
942                 }
943         }
944 #endif
945
946         return retval;
947 }
948
949 static int aarch64_internal_restart(struct target *target)
950 {
951         struct armv8_common *armv8 = target_to_armv8(target);
952         struct arm *arm = &armv8->arm;
953         int retval;
954         uint32_t dscr;
955         /*
956          * * Restart core and wait for it to be started.  Clear ITRen and sticky
957          * * exception flags: see ARMv7 ARM, C5.9.
958          *
959          * REVISIT: for single stepping, we probably want to
960          * disable IRQs by default, with optional override...
961          */
962
963         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
964                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
965         if (retval != ERROR_OK)
966                 return retval;
967
968         if ((dscr & DSCR_ITE) == 0)
969                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
970
971         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
972                         armv8->debug_base + CPUV8_DBG_DSCR, dscr & ~DSCR_ITR_EN);
973         if (retval != ERROR_OK)
974                 return retval;
975
976         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
977                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_RESTART |
978                         DRCR_CLEAR_EXCEPTIONS);
979         if (retval != ERROR_OK)
980                 return retval;
981
982         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
983                         armv8->debug_base + 0x10000 + 0x10, 1);
984         if (retval != ERROR_OK)
985                 return retval;
986
987         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
988                         armv8->debug_base + 0x10000 + 0x1c, 2);
989         if (retval != ERROR_OK)
990                 return retval;
991
992         long long then = timeval_ms();
993         for (;; ) {
994                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
995                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
996                 if (retval != ERROR_OK)
997                         return retval;
998                 if ((dscr & DSCR_CORE_RESTARTED) != 0)
999                         break;
1000                 if (timeval_ms() > then + 1000) {
1001                         LOG_ERROR("Timeout waiting for resume");
1002                         return ERROR_FAIL;
1003                 }
1004         }
1005
1006         target->debug_reason = DBG_REASON_NOTHALTED;
1007         target->state = TARGET_RUNNING;
1008
1009         /* registers are now invalid */
1010         register_cache_invalidate(arm->core_cache);
1011
1012         return ERROR_OK;
1013 }
1014
1015 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1016 {
1017         int retval = 0;
1018         struct target_list *head;
1019         struct target *curr;
1020         uint64_t address;
1021         head = target->head;
1022         while (head != (struct target_list *)NULL) {
1023                 curr = head->target;
1024                 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1025                         /*  resume current address , not in step mode */
1026                         retval += aarch64_internal_restore(curr, 1, &address,
1027                                         handle_breakpoints, 0);
1028                         retval += aarch64_internal_restart(curr);
1029                 }
1030                 head = head->next;
1031
1032         }
1033         return retval;
1034 }
1035
1036 static int aarch64_resume(struct target *target, int current,
1037         target_addr_t address, int handle_breakpoints, int debug_execution)
1038 {
1039         int retval = 0;
1040         uint64_t addr = address;
1041
1042         /* dummy resume for smp toggle in order to reduce gdb impact  */
1043         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1044                 /*   simulate a start and halt of target */
1045                 target->gdb_service->target = NULL;
1046                 target->gdb_service->core[0] = target->gdb_service->core[1];
1047                 /*  fake resume at next poll we play the  target core[1], see poll*/
1048                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1049                 return 0;
1050         }
1051         aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1052                                  debug_execution);
1053         if (target->smp) {
1054                 target->gdb_service->core[0] = -1;
1055                 retval = aarch64_restore_smp(target, handle_breakpoints);
1056                 if (retval != ERROR_OK)
1057                         return retval;
1058         }
1059         aarch64_internal_restart(target);
1060
1061         if (!debug_execution) {
1062                 target->state = TARGET_RUNNING;
1063                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1064                 LOG_DEBUG("target resumed at 0x%" PRIu64, addr);
1065         } else {
1066                 target->state = TARGET_DEBUG_RUNNING;
1067                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1068                 LOG_DEBUG("target debug resumed at 0x%" PRIu64, addr);
1069         }
1070
1071         return ERROR_OK;
1072 }
1073
1074 static int aarch64_debug_entry(struct target *target)
1075 {
1076         uint32_t dscr;
1077         int retval = ERROR_OK;
1078         struct aarch64_common *aarch64 = target_to_aarch64(target);
1079         struct armv8_common *armv8 = target_to_armv8(target);
1080         uint32_t tmp;
1081
1082         LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1083
1084         /* REVISIT surely we should not re-read DSCR !! */
1085         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1086                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1087         if (retval != ERROR_OK)
1088                 return retval;
1089
1090         /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1091          * imprecise data aborts get discarded by issuing a Data
1092          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1093          */
1094
1095         /* Enable the ITR execution once we are in debug mode */
1096         dscr |= DSCR_ITR_EN;
1097         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1098                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1099         if (retval != ERROR_OK)
1100                 return retval;
1101
1102         /* Examine debug reason */
1103         arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1104         mem_ap_read_atomic_u32(armv8->debug_ap,
1105                                    armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1106         if ((tmp & 0x7) == 0x4)
1107                 target->debug_reason = DBG_REASON_SINGLESTEP;
1108
1109         /* save address of instruction that triggered the watchpoint? */
1110         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1111                 uint32_t wfar;
1112
1113                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1114                                 armv8->debug_base + CPUV8_DBG_WFAR0,
1115                                 &wfar);
1116                 if (retval != ERROR_OK)
1117                         return retval;
1118                 arm_dpm_report_wfar(&armv8->dpm, wfar);
1119         }
1120
1121         retval = armv8_dpm_read_current_registers(&armv8->dpm);
1122
1123         if (armv8->post_debug_entry) {
1124                 retval = armv8->post_debug_entry(target);
1125                 if (retval != ERROR_OK)
1126                         return retval;
1127         }
1128
1129         return retval;
1130 }
1131
1132 static int aarch64_post_debug_entry(struct target *target)
1133 {
1134         struct aarch64_common *aarch64 = target_to_aarch64(target);
1135         struct armv8_common *armv8 = &aarch64->armv8_common;
1136         struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1137         uint32_t sctlr_el1 = 0;
1138         int retval;
1139
1140         mem_ap_write_atomic_u32(armv8->debug_ap,
1141                                 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1142         retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1143                                             0xd5381000, &sctlr_el1);
1144         if (retval != ERROR_OK)
1145                 return retval;
1146
1147         LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1148         aarch64->system_control_reg = sctlr_el1;
1149         aarch64->system_control_reg_curr = sctlr_el1;
1150         aarch64->curr_mode = armv8->arm.core_mode;
1151
1152         armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1153         armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1154         armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1155
1156 #if 0
1157         if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1158                 armv8_identify_cache(target);
1159 #endif
1160
1161         return ERROR_OK;
1162 }
1163
1164 static int aarch64_step(struct target *target, int current, target_addr_t address,
1165         int handle_breakpoints)
1166 {
1167         struct armv8_common *armv8 = target_to_armv8(target);
1168         int retval;
1169         uint32_t tmp;
1170
1171         if (target->state != TARGET_HALTED) {
1172                 LOG_WARNING("target not halted");
1173                 return ERROR_TARGET_NOT_HALTED;
1174         }
1175
1176         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1177                         armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1178         if (retval != ERROR_OK)
1179                 return retval;
1180
1181         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1182                         armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1183         if (retval != ERROR_OK)
1184                 return retval;
1185
1186         target->debug_reason = DBG_REASON_SINGLESTEP;
1187         retval = aarch64_resume(target, 1, address, 0, 0);
1188         if (retval != ERROR_OK)
1189                 return retval;
1190
1191         long long then = timeval_ms();
1192         while (target->state != TARGET_HALTED) {
1193                 mem_ap_read_atomic_u32(armv8->debug_ap,
1194                         armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1195                 LOG_DEBUG("DESR = %#x", tmp);
1196                 retval = aarch64_poll(target);
1197                 if (retval != ERROR_OK)
1198                         return retval;
1199                 if (timeval_ms() > then + 1000) {
1200                         LOG_ERROR("timeout waiting for target halt");
1201                         return ERROR_FAIL;
1202                 }
1203         }
1204
1205         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1206                         armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1207         if (retval != ERROR_OK)
1208                 return retval;
1209
1210         target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1211         if (target->state == TARGET_HALTED)
1212                 LOG_DEBUG("target stepped");
1213
1214         return ERROR_OK;
1215 }
1216
1217 static int aarch64_restore_context(struct target *target, bool bpwp)
1218 {
1219         struct armv8_common *armv8 = target_to_armv8(target);
1220
1221         LOG_DEBUG(" ");
1222
1223         if (armv8->pre_restore_context)
1224                 armv8->pre_restore_context(target);
1225
1226         return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1227
1228 }
1229
1230 /*
1231  * Cortex-A8 Breakpoint and watchpoint functions
1232  */
1233
1234 /* Setup hardware Breakpoint Register Pair */
1235 static int aarch64_set_breakpoint(struct target *target,
1236         struct breakpoint *breakpoint, uint8_t matchmode)
1237 {
1238         int retval;
1239         int brp_i = 0;
1240         uint32_t control;
1241         uint8_t byte_addr_select = 0x0F;
1242         struct aarch64_common *aarch64 = target_to_aarch64(target);
1243         struct armv8_common *armv8 = &aarch64->armv8_common;
1244         struct aarch64_brp *brp_list = aarch64->brp_list;
1245         uint32_t dscr;
1246
1247         if (breakpoint->set) {
1248                 LOG_WARNING("breakpoint already set");
1249                 return ERROR_OK;
1250         }
1251
1252         if (breakpoint->type == BKPT_HARD) {
1253                 int64_t bpt_value;
1254                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1255                         brp_i++;
1256                 if (brp_i >= aarch64->brp_num) {
1257                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1258                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1259                 }
1260                 breakpoint->set = brp_i + 1;
1261                 if (breakpoint->length == 2)
1262                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1263                 control = ((matchmode & 0x7) << 20)
1264                         | (1 << 13)
1265                         | (byte_addr_select << 5)
1266                         | (3 << 1) | 1;
1267                 brp_list[brp_i].used = 1;
1268                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1269                 brp_list[brp_i].control = control;
1270                 bpt_value = brp_list[brp_i].value;
1271
1272                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1273                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1274                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1275                 if (retval != ERROR_OK)
1276                         return retval;
1277                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1278                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1279                                 (uint32_t)(bpt_value >> 32));
1280                 if (retval != ERROR_OK)
1281                         return retval;
1282
1283                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1284                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1285                                 brp_list[brp_i].control);
1286                 if (retval != ERROR_OK)
1287                         return retval;
1288                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1289                         brp_list[brp_i].control,
1290                         brp_list[brp_i].value);
1291
1292         } else if (breakpoint->type == BKPT_SOFT) {
1293                 uint8_t code[4];
1294                 buf_set_u32(code, 0, 32, 0xD4400000);
1295
1296                 retval = target_read_memory(target,
1297                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1298                                 breakpoint->length, 1,
1299                                 breakpoint->orig_instr);
1300                 if (retval != ERROR_OK)
1301                         return retval;
1302                 retval = target_write_memory(target,
1303                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1304                                 breakpoint->length, 1, code);
1305                 if (retval != ERROR_OK)
1306                         return retval;
1307                 breakpoint->set = 0x11; /* Any nice value but 0 */
1308         }
1309
1310         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1311                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1312         /* Ensure that halting debug mode is enable */
1313         dscr = dscr | DSCR_HDE;
1314         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1315                                          armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1316         if (retval != ERROR_OK) {
1317                 LOG_DEBUG("Failed to set DSCR.HDE");
1318                 return retval;
1319         }
1320
1321         return ERROR_OK;
1322 }
1323
1324 static int aarch64_set_context_breakpoint(struct target *target,
1325         struct breakpoint *breakpoint, uint8_t matchmode)
1326 {
1327         int retval = ERROR_FAIL;
1328         int brp_i = 0;
1329         uint32_t control;
1330         uint8_t byte_addr_select = 0x0F;
1331         struct aarch64_common *aarch64 = target_to_aarch64(target);
1332         struct armv8_common *armv8 = &aarch64->armv8_common;
1333         struct aarch64_brp *brp_list = aarch64->brp_list;
1334
1335         if (breakpoint->set) {
1336                 LOG_WARNING("breakpoint already set");
1337                 return retval;
1338         }
1339         /*check available context BRPs*/
1340         while ((brp_list[brp_i].used ||
1341                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1342                 brp_i++;
1343
1344         if (brp_i >= aarch64->brp_num) {
1345                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1346                 return ERROR_FAIL;
1347         }
1348
1349         breakpoint->set = brp_i + 1;
1350         control = ((matchmode & 0x7) << 20)
1351                 | (1 << 13)
1352                 | (byte_addr_select << 5)
1353                 | (3 << 1) | 1;
1354         brp_list[brp_i].used = 1;
1355         brp_list[brp_i].value = (breakpoint->asid);
1356         brp_list[brp_i].control = control;
1357         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1358                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1359                         brp_list[brp_i].value);
1360         if (retval != ERROR_OK)
1361                 return retval;
1362         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1363                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1364                         brp_list[brp_i].control);
1365         if (retval != ERROR_OK)
1366                 return retval;
1367         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1368                 brp_list[brp_i].control,
1369                 brp_list[brp_i].value);
1370         return ERROR_OK;
1371
1372 }
1373
1374 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1375 {
1376         int retval = ERROR_FAIL;
1377         int brp_1 = 0;  /* holds the contextID pair */
1378         int brp_2 = 0;  /* holds the IVA pair */
1379         uint32_t control_CTX, control_IVA;
1380         uint8_t CTX_byte_addr_select = 0x0F;
1381         uint8_t IVA_byte_addr_select = 0x0F;
1382         uint8_t CTX_machmode = 0x03;
1383         uint8_t IVA_machmode = 0x01;
1384         struct aarch64_common *aarch64 = target_to_aarch64(target);
1385         struct armv8_common *armv8 = &aarch64->armv8_common;
1386         struct aarch64_brp *brp_list = aarch64->brp_list;
1387
1388         if (breakpoint->set) {
1389                 LOG_WARNING("breakpoint already set");
1390                 return retval;
1391         }
1392         /*check available context BRPs*/
1393         while ((brp_list[brp_1].used ||
1394                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1395                 brp_1++;
1396
1397         printf("brp(CTX) found num: %d\n", brp_1);
1398         if (brp_1 >= aarch64->brp_num) {
1399                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1400                 return ERROR_FAIL;
1401         }
1402
1403         while ((brp_list[brp_2].used ||
1404                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1405                 brp_2++;
1406
1407         printf("brp(IVA) found num: %d\n", brp_2);
1408         if (brp_2 >= aarch64->brp_num) {
1409                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1410                 return ERROR_FAIL;
1411         }
1412
1413         breakpoint->set = brp_1 + 1;
1414         breakpoint->linked_BRP = brp_2;
1415         control_CTX = ((CTX_machmode & 0x7) << 20)
1416                 | (brp_2 << 16)
1417                 | (0 << 14)
1418                 | (CTX_byte_addr_select << 5)
1419                 | (3 << 1) | 1;
1420         brp_list[brp_1].used = 1;
1421         brp_list[brp_1].value = (breakpoint->asid);
1422         brp_list[brp_1].control = control_CTX;
1423         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1424                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1425                         brp_list[brp_1].value);
1426         if (retval != ERROR_OK)
1427                 return retval;
1428         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1430                         brp_list[brp_1].control);
1431         if (retval != ERROR_OK)
1432                 return retval;
1433
1434         control_IVA = ((IVA_machmode & 0x7) << 20)
1435                 | (brp_1 << 16)
1436                 | (1 << 13)
1437                 | (IVA_byte_addr_select << 5)
1438                 | (3 << 1) | 1;
1439         brp_list[brp_2].used = 1;
1440         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1441         brp_list[brp_2].control = control_IVA;
1442         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1443                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1444                         brp_list[brp_2].value & 0xFFFFFFFF);
1445         if (retval != ERROR_OK)
1446                 return retval;
1447         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1448                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1449                         brp_list[brp_2].value >> 32);
1450         if (retval != ERROR_OK)
1451                 return retval;
1452         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1453                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1454                         brp_list[brp_2].control);
1455         if (retval != ERROR_OK)
1456                 return retval;
1457
1458         return ERROR_OK;
1459 }
1460
1461 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1462 {
1463         int retval;
1464         struct aarch64_common *aarch64 = target_to_aarch64(target);
1465         struct armv8_common *armv8 = &aarch64->armv8_common;
1466         struct aarch64_brp *brp_list = aarch64->brp_list;
1467
1468         if (!breakpoint->set) {
1469                 LOG_WARNING("breakpoint not set");
1470                 return ERROR_OK;
1471         }
1472
1473         if (breakpoint->type == BKPT_HARD) {
1474                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1475                         int brp_i = breakpoint->set - 1;
1476                         int brp_j = breakpoint->linked_BRP;
1477                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1478                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1479                                 return ERROR_OK;
1480                         }
1481                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1482                                 brp_list[brp_i].control, brp_list[brp_i].value);
1483                         brp_list[brp_i].used = 0;
1484                         brp_list[brp_i].value = 0;
1485                         brp_list[brp_i].control = 0;
1486                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1487                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1488                                         brp_list[brp_i].control);
1489                         if (retval != ERROR_OK)
1490                                 return retval;
1491                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1492                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1493                                 return ERROR_OK;
1494                         }
1495                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1496                                 brp_list[brp_j].control, brp_list[brp_j].value);
1497                         brp_list[brp_j].used = 0;
1498                         brp_list[brp_j].value = 0;
1499                         brp_list[brp_j].control = 0;
1500                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1501                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1502                                         brp_list[brp_j].control);
1503                         if (retval != ERROR_OK)
1504                                 return retval;
1505                         breakpoint->linked_BRP = 0;
1506                         breakpoint->set = 0;
1507                         return ERROR_OK;
1508
1509                 } else {
1510                         int brp_i = breakpoint->set - 1;
1511                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1512                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1513                                 return ERROR_OK;
1514                         }
1515                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1516                                 brp_list[brp_i].control, brp_list[brp_i].value);
1517                         brp_list[brp_i].used = 0;
1518                         brp_list[brp_i].value = 0;
1519                         brp_list[brp_i].control = 0;
1520                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1521                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1522                                         brp_list[brp_i].control);
1523                         if (retval != ERROR_OK)
1524                                 return retval;
1525                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1526                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1527                                         brp_list[brp_i].value);
1528                         if (retval != ERROR_OK)
1529                                 return retval;
1530                         breakpoint->set = 0;
1531                         return ERROR_OK;
1532                 }
1533         } else {
1534                 /* restore original instruction (kept in target endianness) */
1535                 if (breakpoint->length == 4) {
1536                         retval = target_write_memory(target,
1537                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1538                                         4, 1, breakpoint->orig_instr);
1539                         if (retval != ERROR_OK)
1540                                 return retval;
1541                 } else {
1542                         retval = target_write_memory(target,
1543                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1544                                         2, 1, breakpoint->orig_instr);
1545                         if (retval != ERROR_OK)
1546                                 return retval;
1547                 }
1548         }
1549         breakpoint->set = 0;
1550
1551         return ERROR_OK;
1552 }
1553
1554 static int aarch64_add_breakpoint(struct target *target,
1555         struct breakpoint *breakpoint)
1556 {
1557         struct aarch64_common *aarch64 = target_to_aarch64(target);
1558
1559         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1560                 LOG_INFO("no hardware breakpoint available");
1561                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1562         }
1563
1564         if (breakpoint->type == BKPT_HARD)
1565                 aarch64->brp_num_available--;
1566
1567         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1568 }
1569
1570 static int aarch64_add_context_breakpoint(struct target *target,
1571         struct breakpoint *breakpoint)
1572 {
1573         struct aarch64_common *aarch64 = target_to_aarch64(target);
1574
1575         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1576                 LOG_INFO("no hardware breakpoint available");
1577                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1578         }
1579
1580         if (breakpoint->type == BKPT_HARD)
1581                 aarch64->brp_num_available--;
1582
1583         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1584 }
1585
1586 static int aarch64_add_hybrid_breakpoint(struct target *target,
1587         struct breakpoint *breakpoint)
1588 {
1589         struct aarch64_common *aarch64 = target_to_aarch64(target);
1590
1591         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1592                 LOG_INFO("no hardware breakpoint available");
1593                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1594         }
1595
1596         if (breakpoint->type == BKPT_HARD)
1597                 aarch64->brp_num_available--;
1598
1599         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1600 }
1601
1602
1603 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1604 {
1605         struct aarch64_common *aarch64 = target_to_aarch64(target);
1606
1607 #if 0
1608 /* It is perfectly possible to remove breakpoints while the target is running */
1609         if (target->state != TARGET_HALTED) {
1610                 LOG_WARNING("target not halted");
1611                 return ERROR_TARGET_NOT_HALTED;
1612         }
1613 #endif
1614
1615         if (breakpoint->set) {
1616                 aarch64_unset_breakpoint(target, breakpoint);
1617                 if (breakpoint->type == BKPT_HARD)
1618                         aarch64->brp_num_available++;
1619         }
1620
1621         return ERROR_OK;
1622 }
1623
1624 /*
1625  * Cortex-A8 Reset functions
1626  */
1627
1628 static int aarch64_assert_reset(struct target *target)
1629 {
1630         struct armv8_common *armv8 = target_to_armv8(target);
1631
1632         LOG_DEBUG(" ");
1633
1634         /* FIXME when halt is requested, make it work somehow... */
1635
1636         /* Issue some kind of warm reset. */
1637         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1638                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1639         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1640                 /* REVISIT handle "pulls" cases, if there's
1641                  * hardware that needs them to work.
1642                  */
1643                 jtag_add_reset(0, 1);
1644         } else {
1645                 LOG_ERROR("%s: how to reset?", target_name(target));
1646                 return ERROR_FAIL;
1647         }
1648
1649         /* registers are now invalid */
1650         register_cache_invalidate(armv8->arm.core_cache);
1651
1652         target->state = TARGET_RESET;
1653
1654         return ERROR_OK;
1655 }
1656
1657 static int aarch64_deassert_reset(struct target *target)
1658 {
1659         int retval;
1660
1661         LOG_DEBUG(" ");
1662
1663         /* be certain SRST is off */
1664         jtag_add_reset(0, 0);
1665
1666         retval = aarch64_poll(target);
1667         if (retval != ERROR_OK)
1668                 return retval;
1669
1670         if (target->reset_halt) {
1671                 if (target->state != TARGET_HALTED) {
1672                         LOG_WARNING("%s: ran after reset and before halt ...",
1673                                 target_name(target));
1674                         retval = target_halt(target);
1675                         if (retval != ERROR_OK)
1676                                 return retval;
1677                 }
1678         }
1679
1680         return ERROR_OK;
1681 }
1682
1683 static int aarch64_write_apb_ap_memory(struct target *target,
1684         uint64_t address, uint32_t size,
1685         uint32_t count, const uint8_t *buffer)
1686 {
1687         /* write memory through APB-AP */
1688         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1689         struct armv8_common *armv8 = target_to_armv8(target);
1690         struct arm *arm = &armv8->arm;
1691         int total_bytes = count * size;
1692         int total_u32;
1693         int start_byte = address & 0x3;
1694         int end_byte   = (address + total_bytes) & 0x3;
1695         struct reg *reg;
1696         uint32_t dscr;
1697         uint8_t *tmp_buff = NULL;
1698
1699         LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %"  PRIu32 " count%"  PRIu32,
1700                           address, size, count);
1701         if (target->state != TARGET_HALTED) {
1702                 LOG_WARNING("target not halted");
1703                 return ERROR_TARGET_NOT_HALTED;
1704         }
1705
1706         total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1707
1708         /* Mark register R0 as dirty, as it will be used
1709          * for transferring the data.
1710          * It will be restored automatically when exiting
1711          * debug mode
1712          */
1713         reg = armv8_reg_current(arm, 1);
1714         reg->dirty = true;
1715
1716         reg = armv8_reg_current(arm, 0);
1717         reg->dirty = true;
1718
1719         /*  clear any abort  */
1720         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1721                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1722         if (retval != ERROR_OK)
1723                 return retval;
1724
1725
1726         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1727
1728         /* The algorithm only copies 32 bit words, so the buffer
1729          * should be expanded to include the words at either end.
1730          * The first and last words will be read first to avoid
1731          * corruption if needed.
1732          */
1733         tmp_buff = malloc(total_u32 * 4);
1734
1735         if ((start_byte != 0) && (total_u32 > 1)) {
1736                 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1737                  * the other bytes in the word.
1738                  */
1739                 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1740                 if (retval != ERROR_OK)
1741                         goto error_free_buff_w;
1742         }
1743
1744         /* If end of write is not aligned, or the write is less than 4 bytes */
1745         if ((end_byte != 0) ||
1746                 ((total_u32 == 1) && (total_bytes != 4))) {
1747
1748                 /* Read the last word to avoid corruption during 32 bit write */
1749                 int mem_offset = (total_u32-1) * 4;
1750                 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1751                 if (retval != ERROR_OK)
1752                         goto error_free_buff_w;
1753         }
1754
1755         /* Copy the write buffer over the top of the temporary buffer */
1756         memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1757
1758         /* We now have a 32 bit aligned buffer that can be written */
1759
1760         /* Read DSCR */
1761         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1762                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1763         if (retval != ERROR_OK)
1764                 goto error_free_buff_w;
1765
1766         /* Set Normal access mode  */
1767         dscr = (dscr & ~DSCR_MA);
1768         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1769                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1770
1771         if (arm->core_state == ARM_STATE_AARCH64) {
1772                 /* Write X0 with value 'address' using write procedure */
1773                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1774                 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1775                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1776                 retval += aarch64_exec_opcode(target,
1777                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1778         } else {
1779                 /* Write R0 with value 'address' using write procedure */
1780                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1781                 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1782                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1783                 retval += aarch64_exec_opcode(target,
1784                                 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1785
1786         }
1787         /* Step 1.d   - Change DCC to memory mode */
1788         dscr = dscr | DSCR_MA;
1789         retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1790                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1791         if (retval != ERROR_OK)
1792                 goto error_unset_dtr_w;
1793
1794
1795         /* Step 2.a   - Do the write */
1796         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1797                                         tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1798         if (retval != ERROR_OK)
1799                 goto error_unset_dtr_w;
1800
1801         /* Step 3.a   - Switch DTR mode back to Normal mode */
1802         dscr = (dscr & ~DSCR_MA);
1803         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1804                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1805         if (retval != ERROR_OK)
1806                 goto error_unset_dtr_w;
1807
1808         /* Check for sticky abort flags in the DSCR */
1809         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1810                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1811         if (retval != ERROR_OK)
1812                 goto error_free_buff_w;
1813         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1814                 /* Abort occurred - clear it and exit */
1815                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1816                 mem_ap_write_atomic_u32(armv8->debug_ap,
1817                                         armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1818                 goto error_free_buff_w;
1819         }
1820
1821         /* Done */
1822         free(tmp_buff);
1823         return ERROR_OK;
1824
1825 error_unset_dtr_w:
1826         /* Unset DTR mode */
1827         mem_ap_read_atomic_u32(armv8->debug_ap,
1828                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1829         dscr = (dscr & ~DSCR_MA);
1830         mem_ap_write_atomic_u32(armv8->debug_ap,
1831                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1832 error_free_buff_w:
1833         LOG_ERROR("error");
1834         free(tmp_buff);
1835         return ERROR_FAIL;
1836 }
1837
1838 static int aarch64_read_apb_ap_memory(struct target *target,
1839         target_addr_t address, uint32_t size,
1840         uint32_t count, uint8_t *buffer)
1841 {
1842         /* read memory through APB-AP */
1843         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1844         struct armv8_common *armv8 = target_to_armv8(target);
1845         struct arm *arm = &armv8->arm;
1846         int total_bytes = count * size;
1847         int total_u32;
1848         int start_byte = address & 0x3;
1849         int end_byte   = (address + total_bytes) & 0x3;
1850         struct reg *reg;
1851         uint32_t dscr;
1852         uint8_t *tmp_buff = NULL;
1853         uint8_t *u8buf_ptr;
1854         uint32_t value;
1855
1856         LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%"  PRIu32,
1857                           address, size, count);
1858         if (target->state != TARGET_HALTED) {
1859                 LOG_WARNING("target not halted");
1860                 return ERROR_TARGET_NOT_HALTED;
1861         }
1862
1863         total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1864         /* Mark register X0, X1 as dirty, as it will be used
1865          * for transferring the data.
1866          * It will be restored automatically when exiting
1867          * debug mode
1868          */
1869         reg = armv8_reg_current(arm, 1);
1870         reg->dirty = true;
1871
1872         reg = armv8_reg_current(arm, 0);
1873         reg->dirty = true;
1874
1875         /*      clear any abort  */
1876         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1877                                 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1878         if (retval != ERROR_OK)
1879                 goto error_free_buff_r;
1880
1881         /* Read DSCR */
1882         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1883                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1884
1885         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1886
1887         /* Set Normal access mode  */
1888         dscr = (dscr & ~DSCR_MA);
1889         retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1890                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1891
1892         if (arm->core_state == ARM_STATE_AARCH64) {
1893                 /* Write X0 with value 'address' using write procedure */
1894                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1895                 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1896                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1897                 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1898                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1899                 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1900                 /* Step 1.e - Change DCC to memory mode */
1901                 dscr = dscr | DSCR_MA;
1902                 retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1903                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1904                 /* Step 1.f - read DBGDTRTX and discard the value */
1905                 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1906                                 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1907         } else {
1908                 /* Write R0 with value 'address' using write procedure */
1909                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1910                 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1911                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1912                 retval += aarch64_exec_opcode(target,
1913                                 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1914                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1915                 retval += aarch64_exec_opcode(target,
1916                                 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
1917                 /* Step 1.e - Change DCC to memory mode */
1918                 dscr = dscr | DSCR_MA;
1919                 retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1920                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1921                 /* Step 1.f - read DBGDTRTX and discard the value */
1922                 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1923                                 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1924
1925         }
1926         if (retval != ERROR_OK)
1927                 goto error_unset_dtr_r;
1928
1929         /* Optimize the read as much as we can, either way we read in a single pass  */
1930         if ((start_byte) || (end_byte)) {
1931                 /* The algorithm only copies 32 bit words, so the buffer
1932                  * should be expanded to include the words at either end.
1933                  * The first and last words will be read into a temp buffer
1934                  * to avoid corruption
1935                  */
1936                 tmp_buff = malloc(total_u32 * 4);
1937                 if (!tmp_buff)
1938                         goto error_unset_dtr_r;
1939
1940                 /* use the tmp buffer to read the entire data */
1941                 u8buf_ptr = tmp_buff;
1942         } else
1943                 /* address and read length are aligned so read directly into the passed buffer */
1944                 u8buf_ptr = buffer;
1945
1946         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1947          * Abort flags are sticky, so can be read at end of transactions
1948          *
1949          * This data is read in aligned to 32 bit boundary.
1950          */
1951
1952         /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1953          * increments X0 by 4. */
1954         retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1955                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1956         if (retval != ERROR_OK)
1957                         goto error_unset_dtr_r;
1958
1959         /* Step 3.a - set DTR access mode back to Normal mode   */
1960         dscr = (dscr & ~DSCR_MA);
1961         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1962                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1963         if (retval != ERROR_OK)
1964                 goto error_free_buff_r;
1965
1966         /* Step 3.b - read DBGDTRTX for the final value */
1967         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1968                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1969         memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1970
1971         /* Check for sticky abort flags in the DSCR */
1972         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1973                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1974         if (retval != ERROR_OK)
1975                 goto error_free_buff_r;
1976         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1977                 /* Abort occurred - clear it and exit */
1978                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1979                 mem_ap_write_atomic_u32(armv8->debug_ap,
1980                                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1981                 goto error_free_buff_r;
1982         }
1983
1984         /* check if we need to copy aligned data by applying any shift necessary */
1985         if (tmp_buff) {
1986                 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1987                 free(tmp_buff);
1988         }
1989
1990         /* Done */
1991         return ERROR_OK;
1992
1993 error_unset_dtr_r:
1994         /* Unset DTR mode */
1995         mem_ap_read_atomic_u32(armv8->debug_ap,
1996                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1997         dscr = (dscr & ~DSCR_MA);
1998         mem_ap_write_atomic_u32(armv8->debug_ap,
1999                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2000 error_free_buff_r:
2001         LOG_ERROR("error");
2002         free(tmp_buff);
2003         return ERROR_FAIL;
2004 }
2005
2006 static int aarch64_read_phys_memory(struct target *target,
2007         target_addr_t address, uint32_t size,
2008         uint32_t count, uint8_t *buffer)
2009 {
2010         struct armv8_common *armv8 = target_to_armv8(target);
2011         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2012         struct adiv5_dap *swjdp = armv8->arm.dap;
2013         uint8_t apsel = swjdp->apsel;
2014         LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2015                 address, size, count);
2016
2017         if (count && buffer) {
2018
2019                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2020
2021                         /* read memory through AHB-AP */
2022                         retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2023                 } else {
2024                         /* read memory through APB-AP */
2025                         retval = aarch64_mmu_modify(target, 0);
2026                         if (retval != ERROR_OK)
2027                                 return retval;
2028                         retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2029                 }
2030         }
2031         return retval;
2032 }
2033
2034 static int aarch64_read_memory(struct target *target, target_addr_t address,
2035         uint32_t size, uint32_t count, uint8_t *buffer)
2036 {
2037         int mmu_enabled = 0;
2038         target_addr_t virt, phys;
2039         int retval;
2040         struct armv8_common *armv8 = target_to_armv8(target);
2041         struct adiv5_dap *swjdp = armv8->arm.dap;
2042         uint8_t apsel = swjdp->apsel;
2043
2044         /* aarch64 handles unaligned memory access */
2045         LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2046                 size, count);
2047
2048         /* determine if MMU was enabled on target stop */
2049         if (!armv8->is_armv7r) {
2050                 retval = aarch64_mmu(target, &mmu_enabled);
2051                 if (retval != ERROR_OK)
2052                         return retval;
2053         }
2054
2055         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2056                 if (mmu_enabled) {
2057                         virt = address;
2058                         retval = aarch64_virt2phys(target, virt, &phys);
2059                         if (retval != ERROR_OK)
2060                                 return retval;
2061
2062                         LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2063                                   virt, phys);
2064                         address = phys;
2065                 }
2066                 retval = aarch64_read_phys_memory(target, address, size, count,
2067                                                   buffer);
2068         } else {
2069                 if (mmu_enabled) {
2070                         retval = aarch64_check_address(target, address);
2071                         if (retval != ERROR_OK)
2072                                 return retval;
2073                         /* enable MMU as we could have disabled it for phys
2074                            access */
2075                         retval = aarch64_mmu_modify(target, 1);
2076                         if (retval != ERROR_OK)
2077                                 return retval;
2078                 }
2079                 retval = aarch64_read_apb_ap_memory(target, address, size,
2080                                                     count, buffer);
2081         }
2082         return retval;
2083 }
2084
2085 static int aarch64_write_phys_memory(struct target *target,
2086         target_addr_t address, uint32_t size,
2087         uint32_t count, const uint8_t *buffer)
2088 {
2089         struct armv8_common *armv8 = target_to_armv8(target);
2090         struct adiv5_dap *swjdp = armv8->arm.dap;
2091         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2092         uint8_t apsel = swjdp->apsel;
2093
2094         LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2095                 size, count);
2096
2097         if (count && buffer) {
2098
2099                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2100
2101                         /* write memory through AHB-AP */
2102                         retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2103                 } else {
2104
2105                         /* write memory through APB-AP */
2106                         if (!armv8->is_armv7r) {
2107                                 retval = aarch64_mmu_modify(target, 0);
2108                                 if (retval != ERROR_OK)
2109                                         return retval;
2110                         }
2111                         return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2112                 }
2113         }
2114
2115
2116         /* REVISIT this op is generic ARMv7-A/R stuff */
2117         if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2118                 struct arm_dpm *dpm = armv8->arm.dpm;
2119
2120                 retval = dpm->prepare(dpm);
2121                 if (retval != ERROR_OK)
2122                         return retval;
2123
2124                 /* The Cache handling will NOT work with MMU active, the
2125                  * wrong addresses will be invalidated!
2126                  *
2127                  * For both ICache and DCache, walk all cache lines in the
2128                  * address range. Cortex-A8 has fixed 64 byte line length.
2129                  *
2130                  * REVISIT per ARMv7, these may trigger watchpoints ...
2131                  */
2132
2133                 /* invalidate I-Cache */
2134                 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2135                         /* ICIMVAU - Invalidate Cache single entry
2136                          * with MVA to PoU
2137                          *      MCR p15, 0, r0, c7, c5, 1
2138                          */
2139                         for (uint32_t cacheline = address;
2140                                 cacheline < address + size * count;
2141                                 cacheline += 64) {
2142                                 retval = dpm->instr_write_data_r0(dpm,
2143                                                 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2144                                                 cacheline);
2145                                 if (retval != ERROR_OK)
2146                                         return retval;
2147                         }
2148                 }
2149
2150                 /* invalidate D-Cache */
2151                 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2152                         /* DCIMVAC - Invalidate data Cache line
2153                          * with MVA to PoC
2154                          *      MCR p15, 0, r0, c7, c6, 1
2155                          */
2156                         for (uint32_t cacheline = address;
2157                                 cacheline < address + size * count;
2158                                 cacheline += 64) {
2159                                 retval = dpm->instr_write_data_r0(dpm,
2160                                                 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2161                                                 cacheline);
2162                                 if (retval != ERROR_OK)
2163                                         return retval;
2164                         }
2165                 }
2166
2167                 /* (void) */ dpm->finish(dpm);
2168         }
2169
2170         return retval;
2171 }
2172
2173 static int aarch64_write_memory(struct target *target, target_addr_t address,
2174         uint32_t size, uint32_t count, const uint8_t *buffer)
2175 {
2176         int mmu_enabled = 0;
2177         target_addr_t virt, phys;
2178         int retval;
2179         struct armv8_common *armv8 = target_to_armv8(target);
2180         struct adiv5_dap *swjdp = armv8->arm.dap;
2181         uint8_t apsel = swjdp->apsel;
2182
2183         /* aarch64 handles unaligned memory access */
2184         LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2185                   "; count %" PRId32, address, size, count);
2186
2187         /* determine if MMU was enabled on target stop */
2188         if (!armv8->is_armv7r) {
2189                 retval = aarch64_mmu(target, &mmu_enabled);
2190                 if (retval != ERROR_OK)
2191                         return retval;
2192         }
2193
2194         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2195                 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2196                           PRId32 "; count %" PRId32, address, size, count);
2197                 if (mmu_enabled) {
2198                         virt = address;
2199                         retval = aarch64_virt2phys(target, virt, &phys);
2200                         if (retval != ERROR_OK)
2201                                 return retval;
2202
2203                         LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2204                                   TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2205                         address = phys;
2206                 }
2207                 retval = aarch64_write_phys_memory(target, address, size,
2208                                 count, buffer);
2209         } else {
2210                 if (mmu_enabled) {
2211                         retval = aarch64_check_address(target, address);
2212                         if (retval != ERROR_OK)
2213                                 return retval;
2214                         /* enable MMU as we could have disabled it for phys access */
2215                         retval = aarch64_mmu_modify(target, 1);
2216                         if (retval != ERROR_OK)
2217                                 return retval;
2218                 }
2219                 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2220         }
2221         return retval;
2222 }
2223
2224 static int aarch64_handle_target_request(void *priv)
2225 {
2226         struct target *target = priv;
2227         struct armv8_common *armv8 = target_to_armv8(target);
2228         int retval;
2229
2230         if (!target_was_examined(target))
2231                 return ERROR_OK;
2232         if (!target->dbg_msg_enabled)
2233                 return ERROR_OK;
2234
2235         if (target->state == TARGET_RUNNING) {
2236                 uint32_t request;
2237                 uint32_t dscr;
2238                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2239                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2240
2241                 /* check if we have data */
2242                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2243                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2244                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2245                         if (retval == ERROR_OK) {
2246                                 target_request(target, request);
2247                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2248                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2249                         }
2250                 }
2251         }
2252
2253         return ERROR_OK;
2254 }
2255
2256 static int aarch64_examine_first(struct target *target)
2257 {
2258         struct aarch64_common *aarch64 = target_to_aarch64(target);
2259         struct armv8_common *armv8 = &aarch64->armv8_common;
2260         struct adiv5_dap *swjdp = armv8->arm.dap;
2261         int retval = ERROR_OK;
2262         uint32_t pfr, debug, ctypr, ttypr, cpuid;
2263         int i;
2264
2265         /* We do one extra read to ensure DAP is configured,
2266          * we call ahbap_debugport_init(swjdp) instead
2267          */
2268         retval = dap_dp_init(swjdp);
2269         if (retval != ERROR_OK)
2270                 return retval;
2271
2272         /* Search for the APB-AB - it is needed for access to debug registers */
2273         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2274         if (retval != ERROR_OK) {
2275                 LOG_ERROR("Could not find APB-AP for debug access");
2276                 return retval;
2277         }
2278
2279         retval = mem_ap_init(armv8->debug_ap);
2280         if (retval != ERROR_OK) {
2281                 LOG_ERROR("Could not initialize the APB-AP");
2282                 return retval;
2283         }
2284
2285         armv8->debug_ap->memaccess_tck = 80;
2286
2287         /* Search for the AHB-AB */
2288         armv8->memory_ap_available = false;
2289         retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2290         if (retval == ERROR_OK) {
2291                 retval = mem_ap_init(armv8->memory_ap);
2292                 if (retval == ERROR_OK)
2293                         armv8->memory_ap_available = true;
2294         }
2295         if (retval != ERROR_OK) {
2296                 /* AHB-AP not found or unavailable - use the CPU */
2297                 LOG_DEBUG("No AHB-AP available for memory access");
2298         }
2299
2300
2301         if (!target->dbgbase_set) {
2302                 uint32_t dbgbase;
2303                 /* Get ROM Table base */
2304                 uint32_t apid;
2305                 int32_t coreidx = target->coreid;
2306                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2307                 if (retval != ERROR_OK)
2308                         return retval;
2309                 /* Lookup 0x15 -- Processor DAP */
2310                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2311                                 &armv8->debug_base, &coreidx);
2312                 if (retval != ERROR_OK)
2313                         return retval;
2314                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2315                           coreidx, armv8->debug_base);
2316         } else
2317                 armv8->debug_base = target->dbgbase;
2318
2319         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2320                         armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2321         if (retval != ERROR_OK) {
2322                 LOG_DEBUG("Examine %s failed", "oslock");
2323                 return retval;
2324         }
2325
2326         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2327                         armv8->debug_base + 0x88, &cpuid);
2328         LOG_DEBUG("0x88 = %x", cpuid);
2329
2330         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2331                         armv8->debug_base + 0x314, &cpuid);
2332         LOG_DEBUG("0x314 = %x", cpuid);
2333
2334         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2335                         armv8->debug_base + 0x310, &cpuid);
2336         LOG_DEBUG("0x310 = %x", cpuid);
2337         if (retval != ERROR_OK)
2338                 return retval;
2339
2340         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2341                         armv8->debug_base + CPUDBG_CPUID, &cpuid);
2342         if (retval != ERROR_OK) {
2343                 LOG_DEBUG("Examine %s failed", "CPUID");
2344                 return retval;
2345         }
2346
2347         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2348                         armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2349         if (retval != ERROR_OK) {
2350                 LOG_DEBUG("Examine %s failed", "CTYPR");
2351                 return retval;
2352         }
2353
2354         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2355                         armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2356         if (retval != ERROR_OK) {
2357                 LOG_DEBUG("Examine %s failed", "TTYPR");
2358                 return retval;
2359         }
2360
2361         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2362                         armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2363         if (retval != ERROR_OK) {
2364                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2365                 return retval;
2366         }
2367         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2368                         armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2369         if (retval != ERROR_OK) {
2370                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2371                 return retval;
2372         }
2373
2374         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2375         LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2376         LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2377         LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2378         LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2379
2380         armv8->arm.core_type = ARM_MODE_MON;
2381         armv8->arm.core_state = ARM_STATE_AARCH64;
2382         retval = aarch64_dpm_setup(aarch64, debug);
2383         if (retval != ERROR_OK)
2384                 return retval;
2385
2386         /* Setup Breakpoint Register Pairs */
2387         aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2388         aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2389
2390         /* hack - no context bpt support yet */
2391         aarch64->brp_num_context = 0;
2392
2393         aarch64->brp_num_available = aarch64->brp_num;
2394         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2395         for (i = 0; i < aarch64->brp_num; i++) {
2396                 aarch64->brp_list[i].used = 0;
2397                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2398                         aarch64->brp_list[i].type = BRP_NORMAL;
2399                 else
2400                         aarch64->brp_list[i].type = BRP_CONTEXT;
2401                 aarch64->brp_list[i].value = 0;
2402                 aarch64->brp_list[i].control = 0;
2403                 aarch64->brp_list[i].BRPn = i;
2404         }
2405
2406         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2407
2408         target_set_examined(target);
2409         return ERROR_OK;
2410 }
2411
2412 static int aarch64_examine(struct target *target)
2413 {
2414         int retval = ERROR_OK;
2415
2416         /* don't re-probe hardware after each reset */
2417         if (!target_was_examined(target))
2418                 retval = aarch64_examine_first(target);
2419
2420         /* Configure core debug access */
2421         if (retval == ERROR_OK)
2422                 retval = aarch64_init_debug_access(target);
2423
2424         return retval;
2425 }
2426
2427 /*
2428  *      Cortex-A8 target creation and initialization
2429  */
2430
2431 static int aarch64_init_target(struct command_context *cmd_ctx,
2432         struct target *target)
2433 {
2434         /* examine_first() does a bunch of this */
2435         return ERROR_OK;
2436 }
2437
2438 static int aarch64_init_arch_info(struct target *target,
2439         struct aarch64_common *aarch64, struct jtag_tap *tap)
2440 {
2441         struct armv8_common *armv8 = &aarch64->armv8_common;
2442         struct adiv5_dap *dap = armv8->arm.dap;
2443
2444         armv8->arm.dap = dap;
2445
2446         /* Setup struct aarch64_common */
2447         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2448         /*  tap has no dap initialized */
2449         if (!tap->dap) {
2450                 tap->dap = dap_init();
2451
2452                 /* Leave (only) generic DAP stuff for debugport_init() */
2453                 tap->dap->tap = tap;
2454         }
2455
2456         armv8->arm.dap = tap->dap;
2457
2458         aarch64->fast_reg_read = 0;
2459
2460         /* register arch-specific functions */
2461         armv8->examine_debug_reason = NULL;
2462
2463         armv8->post_debug_entry = aarch64_post_debug_entry;
2464
2465         armv8->pre_restore_context = NULL;
2466
2467         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2468
2469         /* REVISIT v7a setup should be in a v7a-specific routine */
2470         armv8_init_arch_info(target, armv8);
2471         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2472
2473         return ERROR_OK;
2474 }
2475
2476 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2477 {
2478         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2479
2480         aarch64->armv8_common.is_armv7r = false;
2481
2482         return aarch64_init_arch_info(target, aarch64, target->tap);
2483 }
2484
2485 static int aarch64_mmu(struct target *target, int *enabled)
2486 {
2487         if (target->state != TARGET_HALTED) {
2488                 LOG_ERROR("%s: target not halted", __func__);
2489                 return ERROR_TARGET_INVALID;
2490         }
2491
2492         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2493         return ERROR_OK;
2494 }
2495
2496 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2497                              target_addr_t *phys)
2498 {
2499         int retval = ERROR_FAIL;
2500         struct armv8_common *armv8 = target_to_armv8(target);
2501         struct adiv5_dap *swjdp = armv8->arm.dap;
2502         uint8_t apsel = swjdp->apsel;
2503         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2504                 uint32_t ret;
2505                 retval = armv8_mmu_translate_va(target,
2506                                 virt, &ret);
2507                 if (retval != ERROR_OK)
2508                         goto done;
2509                 *phys = ret;
2510         } else {/*  use this method if armv8->memory_ap not selected
2511                  *  mmu must be enable in order to get a correct translation */
2512                 retval = aarch64_mmu_modify(target, 1);
2513                 if (retval != ERROR_OK)
2514                         goto done;
2515                 retval = armv8_mmu_translate_va_pa(target, virt,  phys, 1);
2516         }
2517 done:
2518         return retval;
2519 }
2520
2521 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2522 {
2523         struct target *target = get_current_target(CMD_CTX);
2524         struct armv8_common *armv8 = target_to_armv8(target);
2525
2526         return armv8_handle_cache_info_command(CMD_CTX,
2527                         &armv8->armv8_mmu.armv8_cache);
2528 }
2529
2530
2531 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2532 {
2533         struct target *target = get_current_target(CMD_CTX);
2534         if (!target_was_examined(target)) {
2535                 LOG_ERROR("target not examined yet");
2536                 return ERROR_FAIL;
2537         }
2538
2539         return aarch64_init_debug_access(target);
2540 }
2541 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2542 {
2543         struct target *target = get_current_target(CMD_CTX);
2544         /* check target is an smp target */
2545         struct target_list *head;
2546         struct target *curr;
2547         head = target->head;
2548         target->smp = 0;
2549         if (head != (struct target_list *)NULL) {
2550                 while (head != (struct target_list *)NULL) {
2551                         curr = head->target;
2552                         curr->smp = 0;
2553                         head = head->next;
2554                 }
2555                 /*  fixes the target display to the debugger */
2556                 target->gdb_service->target = target;
2557         }
2558         return ERROR_OK;
2559 }
2560
2561 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2562 {
2563         struct target *target = get_current_target(CMD_CTX);
2564         struct target_list *head;
2565         struct target *curr;
2566         head = target->head;
2567         if (head != (struct target_list *)NULL) {
2568                 target->smp = 1;
2569                 while (head != (struct target_list *)NULL) {
2570                         curr = head->target;
2571                         curr->smp = 1;
2572                         head = head->next;
2573                 }
2574         }
2575         return ERROR_OK;
2576 }
2577
2578 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2579 {
2580         struct target *target = get_current_target(CMD_CTX);
2581         int retval = ERROR_OK;
2582         struct target_list *head;
2583         head = target->head;
2584         if (head != (struct target_list *)NULL) {
2585                 if (CMD_ARGC == 1) {
2586                         int coreid = 0;
2587                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2588                         if (ERROR_OK != retval)
2589                                 return retval;
2590                         target->gdb_service->core[1] = coreid;
2591
2592                 }
2593                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2594                         , target->gdb_service->core[1]);
2595         }
2596         return ERROR_OK;
2597 }
2598
2599 static const struct command_registration aarch64_exec_command_handlers[] = {
2600         {
2601                 .name = "cache_info",
2602                 .handler = aarch64_handle_cache_info_command,
2603                 .mode = COMMAND_EXEC,
2604                 .help = "display information about target caches",
2605                 .usage = "",
2606         },
2607         {
2608                 .name = "dbginit",
2609                 .handler = aarch64_handle_dbginit_command,
2610                 .mode = COMMAND_EXEC,
2611                 .help = "Initialize core debug",
2612                 .usage = "",
2613         },
2614         {       .name = "smp_off",
2615                 .handler = aarch64_handle_smp_off_command,
2616                 .mode = COMMAND_EXEC,
2617                 .help = "Stop smp handling",
2618                 .usage = "",
2619         },
2620         {
2621                 .name = "smp_on",
2622                 .handler = aarch64_handle_smp_on_command,
2623                 .mode = COMMAND_EXEC,
2624                 .help = "Restart smp handling",
2625                 .usage = "",
2626         },
2627         {
2628                 .name = "smp_gdb",
2629                 .handler = aarch64_handle_smp_gdb_command,
2630                 .mode = COMMAND_EXEC,
2631                 .help = "display/fix current core played to gdb",
2632                 .usage = "",
2633         },
2634
2635
2636         COMMAND_REGISTRATION_DONE
2637 };
2638 static const struct command_registration aarch64_command_handlers[] = {
2639         {
2640                 .chain = arm_command_handlers,
2641         },
2642         {
2643                 .chain = armv8_command_handlers,
2644         },
2645         {
2646                 .name = "cortex_a",
2647                 .mode = COMMAND_ANY,
2648                 .help = "Cortex-A command group",
2649                 .usage = "",
2650                 .chain = aarch64_exec_command_handlers,
2651         },
2652         COMMAND_REGISTRATION_DONE
2653 };
2654
2655 struct target_type aarch64_target = {
2656         .name = "aarch64",
2657
2658         .poll = aarch64_poll,
2659         .arch_state = armv8_arch_state,
2660
2661         .halt = aarch64_halt,
2662         .resume = aarch64_resume,
2663         .step = aarch64_step,
2664
2665         .assert_reset = aarch64_assert_reset,
2666         .deassert_reset = aarch64_deassert_reset,
2667
2668         /* REVISIT allow exporting VFP3 registers ... */
2669         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2670
2671         .read_memory = aarch64_read_memory,
2672         .write_memory = aarch64_write_memory,
2673
2674         .checksum_memory = arm_checksum_memory,
2675         .blank_check_memory = arm_blank_check_memory,
2676
2677         .run_algorithm = armv4_5_run_algorithm,
2678
2679         .add_breakpoint = aarch64_add_breakpoint,
2680         .add_context_breakpoint = aarch64_add_context_breakpoint,
2681         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2682         .remove_breakpoint = aarch64_remove_breakpoint,
2683         .add_watchpoint = NULL,
2684         .remove_watchpoint = NULL,
2685
2686         .commands = aarch64_command_handlers,
2687         .target_create = aarch64_target_create,
2688         .init_target = aarch64_init_target,
2689         .examine = aarch64_examine,
2690
2691         .read_phys_memory = aarch64_read_phys_memory,
2692         .write_phys_memory = aarch64_write_phys_memory,
2693         .mmu = aarch64_mmu,
2694         .virt2phys = aarch64_virt2phys,
2695 };