aarch64: fix debug entry from EL0
[fw/openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34         RESTART_LAZY,
35         RESTART_SYNC,
36 };
37
38 enum halt_mode {
39         HALT_LAZY,
40         HALT_SYNC,
41 };
42
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47         struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49         struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51         struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53         struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56         target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_cpu_memory(struct target *target,
58         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
59
60 #define foreach_smp_target(pos, head) \
61         for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int aarch64_restore_system_control_reg(struct target *target)
64 {
65         enum arm_mode target_mode = ARM_MODE_ANY;
66         int retval = ERROR_OK;
67         uint32_t instr;
68
69         struct aarch64_common *aarch64 = target_to_aarch64(target);
70         struct armv8_common *armv8 = target_to_armv8(target);
71
72         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
75
76                 switch (armv8->arm.core_mode) {
77                 case ARMV8_64_EL0T:
78                         target_mode = ARMV8_64_EL1H;
79                         /* fall through */
80                 case ARMV8_64_EL1T:
81                 case ARMV8_64_EL1H:
82                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83                         break;
84                 case ARMV8_64_EL2T:
85                 case ARMV8_64_EL2H:
86                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87                         break;
88                 case ARMV8_64_EL3H:
89                 case ARMV8_64_EL3T:
90                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91                         break;
92
93                 case ARM_MODE_SVC:
94                 case ARM_MODE_ABT:
95                 case ARM_MODE_FIQ:
96                 case ARM_MODE_IRQ:
97                         instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98                         break;
99
100                 default:
101                         LOG_INFO("cannot read system control register in this mode");
102                         return ERROR_FAIL;
103                 }
104
105                 if (target_mode != ARM_MODE_ANY)
106                         armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108                 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109                 if (retval != ERROR_OK)
110                         return retval;
111
112                 if (target_mode != ARM_MODE_ANY)
113                         armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114         }
115
116         return retval;
117 }
118
119 /*  modify system_control_reg in order to enable or disable mmu for :
120  *  - virt2phys address conversion
121  *  - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124         struct aarch64_common *aarch64 = target_to_aarch64(target);
125         struct armv8_common *armv8 = &aarch64->armv8_common;
126         int retval = ERROR_OK;
127         uint32_t instr = 0;
128
129         if (enable) {
130                 /*      if mmu enabled at target stop and mmu not enable */
131                 if (!(aarch64->system_control_reg & 0x1U)) {
132                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133                         return ERROR_FAIL;
134                 }
135                 if (!(aarch64->system_control_reg_curr & 0x1U))
136                         aarch64->system_control_reg_curr |= 0x1U;
137         } else {
138                 if (aarch64->system_control_reg_curr & 0x4U) {
139                         /*  data cache is active */
140                         aarch64->system_control_reg_curr &= ~0x4U;
141                         /* flush data cache armv8 function to be called */
142                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144                 }
145                 if ((aarch64->system_control_reg_curr & 0x1U)) {
146                         aarch64->system_control_reg_curr &= ~0x1U;
147                 }
148         }
149
150         switch (armv8->arm.core_mode) {
151         case ARMV8_64_EL0T:
152         case ARMV8_64_EL1T:
153         case ARMV8_64_EL1H:
154                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155                 break;
156         case ARMV8_64_EL2T:
157         case ARMV8_64_EL2H:
158                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159                 break;
160         case ARMV8_64_EL3H:
161         case ARMV8_64_EL3T:
162                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163                 break;
164
165         case ARM_MODE_SVC:
166         case ARM_MODE_ABT:
167         case ARM_MODE_FIQ:
168         case ARM_MODE_IRQ:
169                 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
170                 break;
171
172         default:
173                 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
174                 break;
175         }
176
177         retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
178                                 aarch64->system_control_reg_curr);
179         return retval;
180 }
181
182 /*
183  * Basic debug access, very low level assumes state is saved
184  */
185 static int aarch64_init_debug_access(struct target *target)
186 {
187         struct armv8_common *armv8 = target_to_armv8(target);
188         int retval;
189         uint32_t dummy;
190
191         LOG_DEBUG("%s", target_name(target));
192
193         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
194                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
195         if (retval != ERROR_OK) {
196                 LOG_DEBUG("Examine %s failed", "oslock");
197                 return retval;
198         }
199
200         /* Clear Sticky Power Down status Bit in PRSR to enable access to
201            the registers in the Core Power Domain */
202         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
203                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
204         if (retval != ERROR_OK)
205                 return retval;
206
207         /*
208          * Static CTI configuration:
209          * Channel 0 -> trigger outputs HALT request to PE
210          * Channel 1 -> trigger outputs Resume request to PE
211          * Gate all channel trigger events from entering the CTM
212          */
213
214         /* Enable CTI */
215         retval = arm_cti_enable(armv8->cti, true);
216         /* By default, gate all channel events to and from the CTM */
217         if (retval == ERROR_OK)
218                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
219         /* output halt requests to PE on channel 0 event */
220         if (retval == ERROR_OK)
221                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
222         /* output restart requests to PE on channel 1 event */
223         if (retval == ERROR_OK)
224                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
225         if (retval != ERROR_OK)
226                 return retval;
227
228         /* Resync breakpoint registers */
229
230         return ERROR_OK;
231 }
232
233 /* Write to memory mapped registers directly with no cache or mmu handling */
234 static int aarch64_dap_write_memap_register_u32(struct target *target,
235         uint32_t address,
236         uint32_t value)
237 {
238         int retval;
239         struct armv8_common *armv8 = target_to_armv8(target);
240
241         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
242
243         return retval;
244 }
245
246 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
247 {
248         struct arm_dpm *dpm = &a8->armv8_common.dpm;
249         int retval;
250
251         dpm->arm = &a8->armv8_common.arm;
252         dpm->didr = debug;
253
254         retval = armv8_dpm_setup(dpm);
255         if (retval == ERROR_OK)
256                 retval = armv8_dpm_initialize(dpm);
257
258         return retval;
259 }
260
261 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
262 {
263         struct armv8_common *armv8 = target_to_armv8(target);
264         return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
265 }
266
267 static int aarch64_check_state_one(struct target *target,
268                 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
269 {
270         struct armv8_common *armv8 = target_to_armv8(target);
271         uint32_t prsr;
272         int retval;
273
274         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
275                         armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
276         if (retval != ERROR_OK)
277                 return retval;
278
279         if (p_prsr)
280                 *p_prsr = prsr;
281
282         if (p_result)
283                 *p_result = (prsr & mask) == (val & mask);
284
285         return ERROR_OK;
286 }
287
288 static int aarch64_wait_halt_one(struct target *target)
289 {
290         int retval = ERROR_OK;
291         uint32_t prsr;
292
293         int64_t then = timeval_ms();
294         for (;;) {
295                 int halted;
296
297                 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
298                 if (retval != ERROR_OK || halted)
299                         break;
300
301                 if (timeval_ms() > then + 1000) {
302                         retval = ERROR_TARGET_TIMEOUT;
303                         LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
304                         break;
305                 }
306         }
307         return retval;
308 }
309
310 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
311 {
312         int retval = ERROR_OK;
313         struct target_list *head = target->head;
314         struct target *first = NULL;
315
316         LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
317
318         while (head != NULL) {
319                 struct target *curr = head->target;
320                 struct armv8_common *armv8 = target_to_armv8(curr);
321                 head = head->next;
322
323                 if (exc_target && curr == target)
324                         continue;
325                 if (!target_was_examined(curr))
326                         continue;
327                 if (curr->state != TARGET_RUNNING)
328                         continue;
329
330                 /* HACK: mark this target as prepared for halting */
331                 curr->debug_reason = DBG_REASON_DBGRQ;
332
333                 /* open the gate for channel 0 to let HALT requests pass to the CTM */
334                 retval = arm_cti_ungate_channel(armv8->cti, 0);
335                 if (retval == ERROR_OK)
336                         retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
337                 if (retval != ERROR_OK)
338                         break;
339
340                 LOG_DEBUG("target %s prepared", target_name(curr));
341
342                 if (first == NULL)
343                         first = curr;
344         }
345
346         if (p_first) {
347                 if (exc_target && first)
348                         *p_first = first;
349                 else
350                         *p_first = target;
351         }
352
353         return retval;
354 }
355
356 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
357 {
358         int retval = ERROR_OK;
359         struct armv8_common *armv8 = target_to_armv8(target);
360
361         LOG_DEBUG("%s", target_name(target));
362
363         /* allow Halting Debug Mode */
364         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
365         if (retval != ERROR_OK)
366                 return retval;
367
368         /* trigger an event on channel 0, this outputs a halt request to the PE */
369         retval = arm_cti_pulse_channel(armv8->cti, 0);
370         if (retval != ERROR_OK)
371                 return retval;
372
373         if (mode == HALT_SYNC) {
374                 retval = aarch64_wait_halt_one(target);
375                 if (retval != ERROR_OK) {
376                         if (retval == ERROR_TARGET_TIMEOUT)
377                                 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
378                         return retval;
379                 }
380         }
381
382         return ERROR_OK;
383 }
384
385 static int aarch64_halt_smp(struct target *target, bool exc_target)
386 {
387         struct target *next = target;
388         int retval;
389
390         /* prepare halt on all PEs of the group */
391         retval = aarch64_prepare_halt_smp(target, exc_target, &next);
392
393         if (exc_target && next == target)
394                 return retval;
395
396         /* halt the target PE */
397         if (retval == ERROR_OK)
398                 retval = aarch64_halt_one(next, HALT_LAZY);
399
400         if (retval != ERROR_OK)
401                 return retval;
402
403         /* wait for all PEs to halt */
404         int64_t then = timeval_ms();
405         for (;;) {
406                 bool all_halted = true;
407                 struct target_list *head;
408                 struct target *curr;
409
410                 foreach_smp_target(head, target->head) {
411                         int halted;
412
413                         curr = head->target;
414
415                         if (!target_was_examined(curr))
416                                 continue;
417
418                         retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
419                         if (retval != ERROR_OK || !halted) {
420                                 all_halted = false;
421                                 break;
422                         }
423                 }
424
425                 if (all_halted)
426                         break;
427
428                 if (timeval_ms() > then + 1000) {
429                         retval = ERROR_TARGET_TIMEOUT;
430                         break;
431                 }
432
433                 /*
434                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
435                  * and it looks like the CTI's are not connected by a common
436                  * trigger matrix. It seems that we need to halt one core in each
437                  * cluster explicitly. So if we find that a core has not halted
438                  * yet, we trigger an explicit halt for the second cluster.
439                  */
440                 retval = aarch64_halt_one(curr, HALT_LAZY);
441                 if (retval != ERROR_OK)
442                         break;
443         }
444
445         return retval;
446 }
447
448 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
449 {
450         struct target *gdb_target = NULL;
451         struct target_list *head;
452         struct target *curr;
453
454         if (debug_reason == DBG_REASON_NOTHALTED) {
455                 LOG_DEBUG("Halting remaining targets in SMP group");
456                 aarch64_halt_smp(target, true);
457         }
458
459         /* poll all targets in the group, but skip the target that serves GDB */
460         foreach_smp_target(head, target->head) {
461                 curr = head->target;
462                 /* skip calling context */
463                 if (curr == target)
464                         continue;
465                 if (!target_was_examined(curr))
466                         continue;
467                 /* skip targets that were already halted */
468                 if (curr->state == TARGET_HALTED)
469                         continue;
470                 /* remember the gdb_service->target */
471                 if (curr->gdb_service != NULL)
472                         gdb_target = curr->gdb_service->target;
473                 /* skip it */
474                 if (curr == gdb_target)
475                         continue;
476
477                 /* avoid recursion in aarch64_poll() */
478                 curr->smp = 0;
479                 aarch64_poll(curr);
480                 curr->smp = 1;
481         }
482
483         /* after all targets were updated, poll the gdb serving target */
484         if (gdb_target != NULL && gdb_target != target)
485                 aarch64_poll(gdb_target);
486
487         return ERROR_OK;
488 }
489
490 /*
491  * Aarch64 Run control
492  */
493
494 static int aarch64_poll(struct target *target)
495 {
496         enum target_state prev_target_state;
497         int retval = ERROR_OK;
498         int halted;
499
500         retval = aarch64_check_state_one(target,
501                                 PRSR_HALT, PRSR_HALT, &halted, NULL);
502         if (retval != ERROR_OK)
503                 return retval;
504
505         if (halted) {
506                 prev_target_state = target->state;
507                 if (prev_target_state != TARGET_HALTED) {
508                         enum target_debug_reason debug_reason = target->debug_reason;
509
510                         /* We have a halting debug event */
511                         target->state = TARGET_HALTED;
512                         LOG_DEBUG("Target %s halted", target_name(target));
513                         retval = aarch64_debug_entry(target);
514                         if (retval != ERROR_OK)
515                                 return retval;
516
517                         if (target->smp)
518                                 update_halt_gdb(target, debug_reason);
519
520                         switch (prev_target_state) {
521                         case TARGET_RUNNING:
522                         case TARGET_UNKNOWN:
523                         case TARGET_RESET:
524                                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
525                                 break;
526                         case TARGET_DEBUG_RUNNING:
527                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
528                                 break;
529                         default:
530                                 break;
531                         }
532                 }
533         } else
534                 target->state = TARGET_RUNNING;
535
536         return retval;
537 }
538
539 static int aarch64_halt(struct target *target)
540 {
541         if (target->smp)
542                 return aarch64_halt_smp(target, false);
543
544         return aarch64_halt_one(target, HALT_SYNC);
545 }
546
547 static int aarch64_restore_one(struct target *target, int current,
548         uint64_t *address, int handle_breakpoints, int debug_execution)
549 {
550         struct armv8_common *armv8 = target_to_armv8(target);
551         struct arm *arm = &armv8->arm;
552         int retval;
553         uint64_t resume_pc;
554
555         LOG_DEBUG("%s", target_name(target));
556
557         if (!debug_execution)
558                 target_free_all_working_areas(target);
559
560         /* current = 1: continue on current pc, otherwise continue at <address> */
561         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
562         if (!current)
563                 resume_pc = *address;
564         else
565                 *address = resume_pc;
566
567         /* Make sure that the Armv7 gdb thumb fixups does not
568          * kill the return address
569          */
570         switch (arm->core_state) {
571                 case ARM_STATE_ARM:
572                         resume_pc &= 0xFFFFFFFC;
573                         break;
574                 case ARM_STATE_AARCH64:
575                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
576                         break;
577                 case ARM_STATE_THUMB:
578                 case ARM_STATE_THUMB_EE:
579                         /* When the return address is loaded into PC
580                          * bit 0 must be 1 to stay in Thumb state
581                          */
582                         resume_pc |= 0x1;
583                         break;
584                 case ARM_STATE_JAZELLE:
585                         LOG_ERROR("How do I resume into Jazelle state??");
586                         return ERROR_FAIL;
587         }
588         LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
589         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
590         arm->pc->dirty = 1;
591         arm->pc->valid = 1;
592
593         /* called it now before restoring context because it uses cpu
594          * register r0 for restoring system control register */
595         retval = aarch64_restore_system_control_reg(target);
596         if (retval == ERROR_OK)
597                 retval = aarch64_restore_context(target, handle_breakpoints);
598
599         return retval;
600 }
601
602 /**
603  * prepare single target for restart
604  *
605  *
606  */
607 static int aarch64_prepare_restart_one(struct target *target)
608 {
609         struct armv8_common *armv8 = target_to_armv8(target);
610         int retval;
611         uint32_t dscr;
612         uint32_t tmp;
613
614         LOG_DEBUG("%s", target_name(target));
615
616         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
617                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
618         if (retval != ERROR_OK)
619                 return retval;
620
621         if ((dscr & DSCR_ITE) == 0)
622                 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
623         if ((dscr & DSCR_ERR) != 0)
624                 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
625
626         /* acknowledge a pending CTI halt event */
627         retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
628         /*
629          * open the CTI gate for channel 1 so that the restart events
630          * get passed along to all PEs. Also close gate for channel 0
631          * to isolate the PE from halt events.
632          */
633         if (retval == ERROR_OK)
634                 retval = arm_cti_ungate_channel(armv8->cti, 1);
635         if (retval == ERROR_OK)
636                 retval = arm_cti_gate_channel(armv8->cti, 0);
637
638         /* make sure that DSCR.HDE is set */
639         if (retval == ERROR_OK) {
640                 dscr |= DSCR_HDE;
641                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
642                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
643         }
644
645         if (retval == ERROR_OK) {
646                 /* clear sticky bits in PRSR, SDR is now 0 */
647                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
648                                 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
649         }
650
651         return retval;
652 }
653
654 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
655 {
656         struct armv8_common *armv8 = target_to_armv8(target);
657         int retval;
658
659         LOG_DEBUG("%s", target_name(target));
660
661         /* trigger an event on channel 1, generates a restart request to the PE */
662         retval = arm_cti_pulse_channel(armv8->cti, 1);
663         if (retval != ERROR_OK)
664                 return retval;
665
666         if (mode == RESTART_SYNC) {
667                 int64_t then = timeval_ms();
668                 for (;;) {
669                         int resumed;
670                         /*
671                          * if PRSR.SDR is set now, the target did restart, even
672                          * if it's now already halted again (e.g. due to breakpoint)
673                          */
674                         retval = aarch64_check_state_one(target,
675                                                 PRSR_SDR, PRSR_SDR, &resumed, NULL);
676                         if (retval != ERROR_OK || resumed)
677                                 break;
678
679                         if (timeval_ms() > then + 1000) {
680                                 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
681                                 retval = ERROR_TARGET_TIMEOUT;
682                                 break;
683                         }
684                 }
685         }
686
687         if (retval != ERROR_OK)
688                 return retval;
689
690         target->debug_reason = DBG_REASON_NOTHALTED;
691         target->state = TARGET_RUNNING;
692
693         return ERROR_OK;
694 }
695
696 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
697 {
698         int retval;
699
700         LOG_DEBUG("%s", target_name(target));
701
702         retval = aarch64_prepare_restart_one(target);
703         if (retval == ERROR_OK)
704                 retval = aarch64_do_restart_one(target, mode);
705
706         return retval;
707 }
708
709 /*
710  * prepare all but the current target for restart
711  */
712 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
713 {
714         int retval = ERROR_OK;
715         struct target_list *head;
716         struct target *first = NULL;
717         uint64_t address;
718
719         foreach_smp_target(head, target->head) {
720                 struct target *curr = head->target;
721
722                 /* skip calling target */
723                 if (curr == target)
724                         continue;
725                 if (!target_was_examined(curr))
726                         continue;
727                 if (curr->state != TARGET_HALTED)
728                         continue;
729
730                 /*  resume at current address, not in step mode */
731                 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
732                 if (retval == ERROR_OK)
733                         retval = aarch64_prepare_restart_one(curr);
734                 if (retval != ERROR_OK) {
735                         LOG_ERROR("failed to restore target %s", target_name(curr));
736                         break;
737                 }
738                 /* remember the first valid target in the group */
739                 if (first == NULL)
740                         first = curr;
741         }
742
743         if (p_first)
744                 *p_first = first;
745
746         return retval;
747 }
748
749
750 static int aarch64_step_restart_smp(struct target *target)
751 {
752         int retval = ERROR_OK;
753         struct target_list *head;
754         struct target *first = NULL;
755
756         LOG_DEBUG("%s", target_name(target));
757
758         retval = aarch64_prep_restart_smp(target, 0, &first);
759         if (retval != ERROR_OK)
760                 return retval;
761
762         if (first != NULL)
763                 retval = aarch64_do_restart_one(first, RESTART_LAZY);
764         if (retval != ERROR_OK) {
765                 LOG_DEBUG("error restarting target %s", target_name(first));
766                 return retval;
767         }
768
769         int64_t then = timeval_ms();
770         for (;;) {
771                 struct target *curr = target;
772                 bool all_resumed = true;
773
774                 foreach_smp_target(head, target->head) {
775                         uint32_t prsr;
776                         int resumed;
777
778                         curr = head->target;
779
780                         if (curr == target)
781                                 continue;
782
783                         if (!target_was_examined(curr))
784                                 continue;
785
786                         retval = aarch64_check_state_one(curr,
787                                         PRSR_SDR, PRSR_SDR, &resumed, &prsr);
788                         if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
789                                 all_resumed = false;
790                                 break;
791                         }
792
793                         if (curr->state != TARGET_RUNNING) {
794                                 curr->state = TARGET_RUNNING;
795                                 curr->debug_reason = DBG_REASON_NOTHALTED;
796                                 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
797                         }
798                 }
799
800                 if (all_resumed)
801                         break;
802
803                 if (timeval_ms() > then + 1000) {
804                         LOG_ERROR("%s: timeout waiting for target resume", __func__);
805                         retval = ERROR_TARGET_TIMEOUT;
806                         break;
807                 }
808                 /*
809                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
810                  * and it looks like the CTI's are not connected by a common
811                  * trigger matrix. It seems that we need to halt one core in each
812                  * cluster explicitly. So if we find that a core has not halted
813                  * yet, we trigger an explicit resume for the second cluster.
814                  */
815                 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
816                 if (retval != ERROR_OK)
817                         break;
818 }
819
820         return retval;
821 }
822
823 static int aarch64_resume(struct target *target, int current,
824         target_addr_t address, int handle_breakpoints, int debug_execution)
825 {
826         int retval = 0;
827         uint64_t addr = address;
828
829         if (target->state != TARGET_HALTED)
830                 return ERROR_TARGET_NOT_HALTED;
831
832         /*
833          * If this target is part of a SMP group, prepare the others
834          * targets for resuming. This involves restoring the complete
835          * target register context and setting up CTI gates to accept
836          * resume events from the trigger matrix.
837          */
838         if (target->smp) {
839                 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
840                 if (retval != ERROR_OK)
841                         return retval;
842         }
843
844         /* all targets prepared, restore and restart the current target */
845         retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
846                                  debug_execution);
847         if (retval == ERROR_OK)
848                 retval = aarch64_restart_one(target, RESTART_SYNC);
849         if (retval != ERROR_OK)
850                 return retval;
851
852         if (target->smp) {
853                 int64_t then = timeval_ms();
854                 for (;;) {
855                         struct target *curr = target;
856                         struct target_list *head;
857                         bool all_resumed = true;
858
859                         foreach_smp_target(head, target->head) {
860                                 uint32_t prsr;
861                                 int resumed;
862
863                                 curr = head->target;
864                                 if (curr == target)
865                                         continue;
866                                 if (!target_was_examined(curr))
867                                         continue;
868
869                                 retval = aarch64_check_state_one(curr,
870                                                 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
871                                 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
872                                         all_resumed = false;
873                                         break;
874                                 }
875
876                                 if (curr->state != TARGET_RUNNING) {
877                                         curr->state = TARGET_RUNNING;
878                                         curr->debug_reason = DBG_REASON_NOTHALTED;
879                                         target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
880                                 }
881                         }
882
883                         if (all_resumed)
884                                 break;
885
886                         if (timeval_ms() > then + 1000) {
887                                 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
888                                 retval = ERROR_TARGET_TIMEOUT;
889                                 break;
890                         }
891
892                         /*
893                          * HACK: on Hi6220 there are 8 cores organized in 2 clusters
894                          * and it looks like the CTI's are not connected by a common
895                          * trigger matrix. It seems that we need to halt one core in each
896                          * cluster explicitly. So if we find that a core has not halted
897                          * yet, we trigger an explicit resume for the second cluster.
898                          */
899                         retval = aarch64_do_restart_one(curr, RESTART_LAZY);
900                         if (retval != ERROR_OK)
901                                 break;
902                 }
903         }
904
905         if (retval != ERROR_OK)
906                 return retval;
907
908         target->debug_reason = DBG_REASON_NOTHALTED;
909
910         if (!debug_execution) {
911                 target->state = TARGET_RUNNING;
912                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
913                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
914         } else {
915                 target->state = TARGET_DEBUG_RUNNING;
916                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
917                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
918         }
919
920         return ERROR_OK;
921 }
922
923 static int aarch64_debug_entry(struct target *target)
924 {
925         int retval = ERROR_OK;
926         struct armv8_common *armv8 = target_to_armv8(target);
927         struct arm_dpm *dpm = &armv8->dpm;
928         enum arm_state core_state;
929         uint32_t dscr;
930
931         /* make sure to clear all sticky errors */
932         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
933                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
934         if (retval == ERROR_OK)
935                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
936                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
937         if (retval == ERROR_OK)
938                 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
939
940         if (retval != ERROR_OK)
941                 return retval;
942
943         LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
944
945         dpm->dscr = dscr;
946         core_state = armv8_dpm_get_core_state(dpm);
947         armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
948         armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
949
950         /* close the CTI gate for all events */
951         if (retval == ERROR_OK)
952                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
953         /* discard async exceptions */
954         if (retval == ERROR_OK)
955                 retval = dpm->instr_cpsr_sync(dpm);
956         if (retval != ERROR_OK)
957                 return retval;
958
959         /* Examine debug reason */
960         armv8_dpm_report_dscr(dpm, dscr);
961
962         /* save address of instruction that triggered the watchpoint? */
963         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
964                 uint32_t tmp;
965                 uint64_t wfar = 0;
966
967                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
968                                 armv8->debug_base + CPUV8_DBG_WFAR1,
969                                 &tmp);
970                 if (retval != ERROR_OK)
971                         return retval;
972                 wfar = tmp;
973                 wfar = (wfar << 32);
974                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
975                                 armv8->debug_base + CPUV8_DBG_WFAR0,
976                                 &tmp);
977                 if (retval != ERROR_OK)
978                         return retval;
979                 wfar |= tmp;
980                 armv8_dpm_report_wfar(&armv8->dpm, wfar);
981         }
982
983         retval = armv8_dpm_read_current_registers(&armv8->dpm);
984
985         if (retval == ERROR_OK && armv8->post_debug_entry)
986                 retval = armv8->post_debug_entry(target);
987
988         return retval;
989 }
990
991 static int aarch64_post_debug_entry(struct target *target)
992 {
993         struct aarch64_common *aarch64 = target_to_aarch64(target);
994         struct armv8_common *armv8 = &aarch64->armv8_common;
995         int retval;
996         enum arm_mode target_mode = ARM_MODE_ANY;
997         uint32_t instr;
998
999         switch (armv8->arm.core_mode) {
1000         case ARMV8_64_EL0T:
1001                 target_mode = ARMV8_64_EL1H;
1002                 /* fall through */
1003         case ARMV8_64_EL1T:
1004         case ARMV8_64_EL1H:
1005                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1006                 break;
1007         case ARMV8_64_EL2T:
1008         case ARMV8_64_EL2H:
1009                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1010                 break;
1011         case ARMV8_64_EL3H:
1012         case ARMV8_64_EL3T:
1013                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1014                 break;
1015
1016         case ARM_MODE_SVC:
1017         case ARM_MODE_ABT:
1018         case ARM_MODE_FIQ:
1019         case ARM_MODE_IRQ:
1020                 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1021                 break;
1022
1023         default:
1024                 LOG_INFO("cannot read system control register in this mode");
1025                 return ERROR_FAIL;
1026         }
1027
1028         if (target_mode != ARM_MODE_ANY)
1029                 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1030
1031         retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1032         if (retval != ERROR_OK)
1033                 return retval;
1034
1035         if (target_mode != ARM_MODE_ANY)
1036                 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1037
1038         LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1039         aarch64->system_control_reg_curr = aarch64->system_control_reg;
1040
1041         if (armv8->armv8_mmu.armv8_cache.info == -1) {
1042                 armv8_identify_cache(armv8);
1043                 armv8_read_mpidr(armv8);
1044         }
1045
1046         armv8->armv8_mmu.mmu_enabled =
1047                         (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1048         armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1049                 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1050         armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1051                 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1052         return ERROR_OK;
1053 }
1054
1055 /*
1056  * single-step a target
1057  */
1058 static int aarch64_step(struct target *target, int current, target_addr_t address,
1059         int handle_breakpoints)
1060 {
1061         struct armv8_common *armv8 = target_to_armv8(target);
1062         struct aarch64_common *aarch64 = target_to_aarch64(target);
1063         int saved_retval = ERROR_OK;
1064         int retval;
1065         uint32_t edecr;
1066
1067         if (target->state != TARGET_HALTED) {
1068                 LOG_WARNING("target not halted");
1069                 return ERROR_TARGET_NOT_HALTED;
1070         }
1071
1072         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1073                         armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1074         /* make sure EDECR.SS is not set when restoring the register */
1075
1076         if (retval == ERROR_OK) {
1077                 edecr &= ~0x4;
1078                 /* set EDECR.SS to enter hardware step mode */
1079                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1080                                 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1081         }
1082         /* disable interrupts while stepping */
1083         if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1084                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1085         /* bail out if stepping setup has failed */
1086         if (retval != ERROR_OK)
1087                 return retval;
1088
1089         if (target->smp && (current == 1)) {
1090                 /*
1091                  * isolate current target so that it doesn't get resumed
1092                  * together with the others
1093                  */
1094                 retval = arm_cti_gate_channel(armv8->cti, 1);
1095                 /* resume all other targets in the group */
1096                 if (retval == ERROR_OK)
1097                         retval = aarch64_step_restart_smp(target);
1098                 if (retval != ERROR_OK) {
1099                         LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1100                         return retval;
1101                 }
1102                 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1103         }
1104
1105         /* all other targets running, restore and restart the current target */
1106         retval = aarch64_restore_one(target, current, &address, 0, 0);
1107         if (retval == ERROR_OK)
1108                 retval = aarch64_restart_one(target, RESTART_LAZY);
1109
1110         if (retval != ERROR_OK)
1111                 return retval;
1112
1113         LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1114         if (!handle_breakpoints)
1115                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1116
1117         int64_t then = timeval_ms();
1118         for (;;) {
1119                 int stepped;
1120                 uint32_t prsr;
1121
1122                 retval = aarch64_check_state_one(target,
1123                                         PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1124                 if (retval != ERROR_OK || stepped)
1125                         break;
1126
1127                 if (timeval_ms() > then + 100) {
1128                         LOG_ERROR("timeout waiting for target %s halt after step",
1129                                         target_name(target));
1130                         retval = ERROR_TARGET_TIMEOUT;
1131                         break;
1132                 }
1133         }
1134
1135         /*
1136          * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1137          * causes a timeout. The core takes the step but doesn't complete it and so
1138          * debug state is never entered. However, you can manually halt the core
1139          * as an external debug even is also a WFI wakeup event.
1140          */
1141         if (retval == ERROR_TARGET_TIMEOUT)
1142                 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1143
1144         /* restore EDECR */
1145         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1146                         armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1147         if (retval != ERROR_OK)
1148                 return retval;
1149
1150         /* restore interrupts */
1151         if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1152                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1153                 if (retval != ERROR_OK)
1154                         return ERROR_OK;
1155         }
1156
1157         if (saved_retval != ERROR_OK)
1158                 return saved_retval;
1159
1160         return aarch64_poll(target);
1161 }
1162
1163 static int aarch64_restore_context(struct target *target, bool bpwp)
1164 {
1165         struct armv8_common *armv8 = target_to_armv8(target);
1166         struct arm *arm = &armv8->arm;
1167
1168         int retval;
1169
1170         LOG_DEBUG("%s", target_name(target));
1171
1172         if (armv8->pre_restore_context)
1173                 armv8->pre_restore_context(target);
1174
1175         retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1176         if (retval == ERROR_OK) {
1177                 /* registers are now invalid */
1178                 register_cache_invalidate(arm->core_cache);
1179                 register_cache_invalidate(arm->core_cache->next);
1180         }
1181
1182         return retval;
1183 }
1184
1185 /*
1186  * Cortex-A8 Breakpoint and watchpoint functions
1187  */
1188
1189 /* Setup hardware Breakpoint Register Pair */
1190 static int aarch64_set_breakpoint(struct target *target,
1191         struct breakpoint *breakpoint, uint8_t matchmode)
1192 {
1193         int retval;
1194         int brp_i = 0;
1195         uint32_t control;
1196         uint8_t byte_addr_select = 0x0F;
1197         struct aarch64_common *aarch64 = target_to_aarch64(target);
1198         struct armv8_common *armv8 = &aarch64->armv8_common;
1199         struct aarch64_brp *brp_list = aarch64->brp_list;
1200
1201         if (breakpoint->set) {
1202                 LOG_WARNING("breakpoint already set");
1203                 return ERROR_OK;
1204         }
1205
1206         if (breakpoint->type == BKPT_HARD) {
1207                 int64_t bpt_value;
1208                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1209                         brp_i++;
1210                 if (brp_i >= aarch64->brp_num) {
1211                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1212                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1213                 }
1214                 breakpoint->set = brp_i + 1;
1215                 if (breakpoint->length == 2)
1216                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1217                 control = ((matchmode & 0x7) << 20)
1218                         | (1 << 13)
1219                         | (byte_addr_select << 5)
1220                         | (3 << 1) | 1;
1221                 brp_list[brp_i].used = 1;
1222                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1223                 brp_list[brp_i].control = control;
1224                 bpt_value = brp_list[brp_i].value;
1225
1226                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1227                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1228                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1229                 if (retval != ERROR_OK)
1230                         return retval;
1231                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1232                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1233                                 (uint32_t)(bpt_value >> 32));
1234                 if (retval != ERROR_OK)
1235                         return retval;
1236
1237                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1238                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1239                                 brp_list[brp_i].control);
1240                 if (retval != ERROR_OK)
1241                         return retval;
1242                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1243                         brp_list[brp_i].control,
1244                         brp_list[brp_i].value);
1245
1246         } else if (breakpoint->type == BKPT_SOFT) {
1247                 uint8_t code[4];
1248
1249                 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1250                 retval = target_read_memory(target,
1251                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1252                                 breakpoint->length, 1,
1253                                 breakpoint->orig_instr);
1254                 if (retval != ERROR_OK)
1255                         return retval;
1256
1257                 armv8_cache_d_inner_flush_virt(armv8,
1258                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1259                                 breakpoint->length);
1260
1261                 retval = target_write_memory(target,
1262                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1263                                 breakpoint->length, 1, code);
1264                 if (retval != ERROR_OK)
1265                         return retval;
1266
1267                 armv8_cache_d_inner_flush_virt(armv8,
1268                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1269                                 breakpoint->length);
1270
1271                 armv8_cache_i_inner_inval_virt(armv8,
1272                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1273                                 breakpoint->length);
1274
1275                 breakpoint->set = 0x11; /* Any nice value but 0 */
1276         }
1277
1278         /* Ensure that halting debug mode is enable */
1279         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1280         if (retval != ERROR_OK) {
1281                 LOG_DEBUG("Failed to set DSCR.HDE");
1282                 return retval;
1283         }
1284
1285         return ERROR_OK;
1286 }
1287
1288 static int aarch64_set_context_breakpoint(struct target *target,
1289         struct breakpoint *breakpoint, uint8_t matchmode)
1290 {
1291         int retval = ERROR_FAIL;
1292         int brp_i = 0;
1293         uint32_t control;
1294         uint8_t byte_addr_select = 0x0F;
1295         struct aarch64_common *aarch64 = target_to_aarch64(target);
1296         struct armv8_common *armv8 = &aarch64->armv8_common;
1297         struct aarch64_brp *brp_list = aarch64->brp_list;
1298
1299         if (breakpoint->set) {
1300                 LOG_WARNING("breakpoint already set");
1301                 return retval;
1302         }
1303         /*check available context BRPs*/
1304         while ((brp_list[brp_i].used ||
1305                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1306                 brp_i++;
1307
1308         if (brp_i >= aarch64->brp_num) {
1309                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1310                 return ERROR_FAIL;
1311         }
1312
1313         breakpoint->set = brp_i + 1;
1314         control = ((matchmode & 0x7) << 20)
1315                 | (1 << 13)
1316                 | (byte_addr_select << 5)
1317                 | (3 << 1) | 1;
1318         brp_list[brp_i].used = 1;
1319         brp_list[brp_i].value = (breakpoint->asid);
1320         brp_list[brp_i].control = control;
1321         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1322                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1323                         brp_list[brp_i].value);
1324         if (retval != ERROR_OK)
1325                 return retval;
1326         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1327                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1328                         brp_list[brp_i].control);
1329         if (retval != ERROR_OK)
1330                 return retval;
1331         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1332                 brp_list[brp_i].control,
1333                 brp_list[brp_i].value);
1334         return ERROR_OK;
1335
1336 }
1337
1338 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1339 {
1340         int retval = ERROR_FAIL;
1341         int brp_1 = 0;  /* holds the contextID pair */
1342         int brp_2 = 0;  /* holds the IVA pair */
1343         uint32_t control_CTX, control_IVA;
1344         uint8_t CTX_byte_addr_select = 0x0F;
1345         uint8_t IVA_byte_addr_select = 0x0F;
1346         uint8_t CTX_machmode = 0x03;
1347         uint8_t IVA_machmode = 0x01;
1348         struct aarch64_common *aarch64 = target_to_aarch64(target);
1349         struct armv8_common *armv8 = &aarch64->armv8_common;
1350         struct aarch64_brp *brp_list = aarch64->brp_list;
1351
1352         if (breakpoint->set) {
1353                 LOG_WARNING("breakpoint already set");
1354                 return retval;
1355         }
1356         /*check available context BRPs*/
1357         while ((brp_list[brp_1].used ||
1358                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1359                 brp_1++;
1360
1361         printf("brp(CTX) found num: %d\n", brp_1);
1362         if (brp_1 >= aarch64->brp_num) {
1363                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1364                 return ERROR_FAIL;
1365         }
1366
1367         while ((brp_list[brp_2].used ||
1368                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1369                 brp_2++;
1370
1371         printf("brp(IVA) found num: %d\n", brp_2);
1372         if (brp_2 >= aarch64->brp_num) {
1373                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374                 return ERROR_FAIL;
1375         }
1376
1377         breakpoint->set = brp_1 + 1;
1378         breakpoint->linked_BRP = brp_2;
1379         control_CTX = ((CTX_machmode & 0x7) << 20)
1380                 | (brp_2 << 16)
1381                 | (0 << 14)
1382                 | (CTX_byte_addr_select << 5)
1383                 | (3 << 1) | 1;
1384         brp_list[brp_1].used = 1;
1385         brp_list[brp_1].value = (breakpoint->asid);
1386         brp_list[brp_1].control = control_CTX;
1387         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1388                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1389                         brp_list[brp_1].value);
1390         if (retval != ERROR_OK)
1391                 return retval;
1392         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1393                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1394                         brp_list[brp_1].control);
1395         if (retval != ERROR_OK)
1396                 return retval;
1397
1398         control_IVA = ((IVA_machmode & 0x7) << 20)
1399                 | (brp_1 << 16)
1400                 | (1 << 13)
1401                 | (IVA_byte_addr_select << 5)
1402                 | (3 << 1) | 1;
1403         brp_list[brp_2].used = 1;
1404         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1405         brp_list[brp_2].control = control_IVA;
1406         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1407                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1408                         brp_list[brp_2].value & 0xFFFFFFFF);
1409         if (retval != ERROR_OK)
1410                 return retval;
1411         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1412                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1413                         brp_list[brp_2].value >> 32);
1414         if (retval != ERROR_OK)
1415                 return retval;
1416         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1417                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1418                         brp_list[brp_2].control);
1419         if (retval != ERROR_OK)
1420                 return retval;
1421
1422         return ERROR_OK;
1423 }
1424
1425 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1426 {
1427         int retval;
1428         struct aarch64_common *aarch64 = target_to_aarch64(target);
1429         struct armv8_common *armv8 = &aarch64->armv8_common;
1430         struct aarch64_brp *brp_list = aarch64->brp_list;
1431
1432         if (!breakpoint->set) {
1433                 LOG_WARNING("breakpoint not set");
1434                 return ERROR_OK;
1435         }
1436
1437         if (breakpoint->type == BKPT_HARD) {
1438                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1439                         int brp_i = breakpoint->set - 1;
1440                         int brp_j = breakpoint->linked_BRP;
1441                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1442                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1443                                 return ERROR_OK;
1444                         }
1445                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1446                                 brp_list[brp_i].control, brp_list[brp_i].value);
1447                         brp_list[brp_i].used = 0;
1448                         brp_list[brp_i].value = 0;
1449                         brp_list[brp_i].control = 0;
1450                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1452                                         brp_list[brp_i].control);
1453                         if (retval != ERROR_OK)
1454                                 return retval;
1455                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1457                                         (uint32_t)brp_list[brp_i].value);
1458                         if (retval != ERROR_OK)
1459                                 return retval;
1460                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1462                                         (uint32_t)brp_list[brp_i].value);
1463                         if (retval != ERROR_OK)
1464                                 return retval;
1465                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1466                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1467                                 return ERROR_OK;
1468                         }
1469                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1470                                 brp_list[brp_j].control, brp_list[brp_j].value);
1471                         brp_list[brp_j].used = 0;
1472                         brp_list[brp_j].value = 0;
1473                         brp_list[brp_j].control = 0;
1474                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1475                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1476                                         brp_list[brp_j].control);
1477                         if (retval != ERROR_OK)
1478                                 return retval;
1479                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1480                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1481                                         (uint32_t)brp_list[brp_j].value);
1482                         if (retval != ERROR_OK)
1483                                 return retval;
1484                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1485                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1486                                         (uint32_t)brp_list[brp_j].value);
1487                         if (retval != ERROR_OK)
1488                                 return retval;
1489
1490                         breakpoint->linked_BRP = 0;
1491                         breakpoint->set = 0;
1492                         return ERROR_OK;
1493
1494                 } else {
1495                         int brp_i = breakpoint->set - 1;
1496                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1497                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1498                                 return ERROR_OK;
1499                         }
1500                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1501                                 brp_list[brp_i].control, brp_list[brp_i].value);
1502                         brp_list[brp_i].used = 0;
1503                         brp_list[brp_i].value = 0;
1504                         brp_list[brp_i].control = 0;
1505                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1506                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1507                                         brp_list[brp_i].control);
1508                         if (retval != ERROR_OK)
1509                                 return retval;
1510                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1511                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1512                                         brp_list[brp_i].value);
1513                         if (retval != ERROR_OK)
1514                                 return retval;
1515
1516                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1518                                         (uint32_t)brp_list[brp_i].value);
1519                         if (retval != ERROR_OK)
1520                                 return retval;
1521                         breakpoint->set = 0;
1522                         return ERROR_OK;
1523                 }
1524         } else {
1525                 /* restore original instruction (kept in target endianness) */
1526
1527                 armv8_cache_d_inner_flush_virt(armv8,
1528                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1529                                 breakpoint->length);
1530
1531                 if (breakpoint->length == 4) {
1532                         retval = target_write_memory(target,
1533                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1534                                         4, 1, breakpoint->orig_instr);
1535                         if (retval != ERROR_OK)
1536                                 return retval;
1537                 } else {
1538                         retval = target_write_memory(target,
1539                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1540                                         2, 1, breakpoint->orig_instr);
1541                         if (retval != ERROR_OK)
1542                                 return retval;
1543                 }
1544
1545                 armv8_cache_d_inner_flush_virt(armv8,
1546                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1547                                 breakpoint->length);
1548
1549                 armv8_cache_i_inner_inval_virt(armv8,
1550                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1551                                 breakpoint->length);
1552         }
1553         breakpoint->set = 0;
1554
1555         return ERROR_OK;
1556 }
1557
1558 static int aarch64_add_breakpoint(struct target *target,
1559         struct breakpoint *breakpoint)
1560 {
1561         struct aarch64_common *aarch64 = target_to_aarch64(target);
1562
1563         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1564                 LOG_INFO("no hardware breakpoint available");
1565                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1566         }
1567
1568         if (breakpoint->type == BKPT_HARD)
1569                 aarch64->brp_num_available--;
1570
1571         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1572 }
1573
1574 static int aarch64_add_context_breakpoint(struct target *target,
1575         struct breakpoint *breakpoint)
1576 {
1577         struct aarch64_common *aarch64 = target_to_aarch64(target);
1578
1579         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1580                 LOG_INFO("no hardware breakpoint available");
1581                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1582         }
1583
1584         if (breakpoint->type == BKPT_HARD)
1585                 aarch64->brp_num_available--;
1586
1587         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1588 }
1589
1590 static int aarch64_add_hybrid_breakpoint(struct target *target,
1591         struct breakpoint *breakpoint)
1592 {
1593         struct aarch64_common *aarch64 = target_to_aarch64(target);
1594
1595         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1596                 LOG_INFO("no hardware breakpoint available");
1597                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1598         }
1599
1600         if (breakpoint->type == BKPT_HARD)
1601                 aarch64->brp_num_available--;
1602
1603         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1604 }
1605
1606
1607 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1608 {
1609         struct aarch64_common *aarch64 = target_to_aarch64(target);
1610
1611 #if 0
1612 /* It is perfectly possible to remove breakpoints while the target is running */
1613         if (target->state != TARGET_HALTED) {
1614                 LOG_WARNING("target not halted");
1615                 return ERROR_TARGET_NOT_HALTED;
1616         }
1617 #endif
1618
1619         if (breakpoint->set) {
1620                 aarch64_unset_breakpoint(target, breakpoint);
1621                 if (breakpoint->type == BKPT_HARD)
1622                         aarch64->brp_num_available++;
1623         }
1624
1625         return ERROR_OK;
1626 }
1627
1628 /*
1629  * Cortex-A8 Reset functions
1630  */
1631
1632 static int aarch64_assert_reset(struct target *target)
1633 {
1634         struct armv8_common *armv8 = target_to_armv8(target);
1635
1636         LOG_DEBUG(" ");
1637
1638         /* FIXME when halt is requested, make it work somehow... */
1639
1640         /* Issue some kind of warm reset. */
1641         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1642                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1643         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1644                 /* REVISIT handle "pulls" cases, if there's
1645                  * hardware that needs them to work.
1646                  */
1647                 jtag_add_reset(0, 1);
1648         } else {
1649                 LOG_ERROR("%s: how to reset?", target_name(target));
1650                 return ERROR_FAIL;
1651         }
1652
1653         /* registers are now invalid */
1654         if (target_was_examined(target)) {
1655                 register_cache_invalidate(armv8->arm.core_cache);
1656                 register_cache_invalidate(armv8->arm.core_cache->next);
1657         }
1658
1659         target->state = TARGET_RESET;
1660
1661         return ERROR_OK;
1662 }
1663
1664 static int aarch64_deassert_reset(struct target *target)
1665 {
1666         int retval;
1667
1668         LOG_DEBUG(" ");
1669
1670         /* be certain SRST is off */
1671         jtag_add_reset(0, 0);
1672
1673         if (!target_was_examined(target))
1674                 return ERROR_OK;
1675
1676         retval = aarch64_poll(target);
1677         if (retval != ERROR_OK)
1678                 return retval;
1679
1680         if (target->reset_halt) {
1681                 if (target->state != TARGET_HALTED) {
1682                         LOG_WARNING("%s: ran after reset and before halt ...",
1683                                 target_name(target));
1684                         retval = target_halt(target);
1685                         if (retval != ERROR_OK)
1686                                 return retval;
1687                 }
1688         }
1689
1690         return aarch64_init_debug_access(target);
1691 }
1692
1693 static int aarch64_write_cpu_memory_slow(struct target *target,
1694         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1695 {
1696         struct armv8_common *armv8 = target_to_armv8(target);
1697         struct arm_dpm *dpm = &armv8->dpm;
1698         struct arm *arm = &armv8->arm;
1699         int retval;
1700
1701         armv8_reg_current(arm, 1)->dirty = true;
1702
1703         /* change DCC to normal mode if necessary */
1704         if (*dscr & DSCR_MA) {
1705                 *dscr &= ~DSCR_MA;
1706                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1707                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1708                 if (retval != ERROR_OK)
1709                         return retval;
1710         }
1711
1712         while (count) {
1713                 uint32_t data, opcode;
1714
1715                 /* write the data to store into DTRRX */
1716                 if (size == 1)
1717                         data = *buffer;
1718                 else if (size == 2)
1719                         data = target_buffer_get_u16(target, buffer);
1720                 else
1721                         data = target_buffer_get_u32(target, buffer);
1722                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1723                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1724                 if (retval != ERROR_OK)
1725                         return retval;
1726
1727                 if (arm->core_state == ARM_STATE_AARCH64)
1728                         retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1729                 else
1730                         retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1731                 if (retval != ERROR_OK)
1732                         return retval;
1733
1734                 if (size == 1)
1735                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1736                 else if (size == 2)
1737                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1738                 else
1739                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1740                 retval = dpm->instr_execute(dpm, opcode);
1741                 if (retval != ERROR_OK)
1742                         return retval;
1743
1744                 /* Advance */
1745                 buffer += size;
1746                 --count;
1747         }
1748
1749         return ERROR_OK;
1750 }
1751
1752 static int aarch64_write_cpu_memory_fast(struct target *target,
1753         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1754 {
1755         struct armv8_common *armv8 = target_to_armv8(target);
1756         struct arm *arm = &armv8->arm;
1757         int retval;
1758
1759         armv8_reg_current(arm, 1)->dirty = true;
1760
1761         /* Step 1.d   - Change DCC to memory mode */
1762         *dscr |= DSCR_MA;
1763         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1764                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1765         if (retval != ERROR_OK)
1766                 return retval;
1767
1768
1769         /* Step 2.a   - Do the write */
1770         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1771                                         buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1772         if (retval != ERROR_OK)
1773                 return retval;
1774
1775         /* Step 3.a   - Switch DTR mode back to Normal mode */
1776         *dscr &= ~DSCR_MA;
1777         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1778                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1779         if (retval != ERROR_OK)
1780                 return retval;
1781
1782         return ERROR_OK;
1783 }
1784
1785 static int aarch64_write_cpu_memory(struct target *target,
1786         uint64_t address, uint32_t size,
1787         uint32_t count, const uint8_t *buffer)
1788 {
1789         /* write memory through APB-AP */
1790         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1791         struct armv8_common *armv8 = target_to_armv8(target);
1792         struct arm_dpm *dpm = &armv8->dpm;
1793         struct arm *arm = &armv8->arm;
1794         uint32_t dscr;
1795
1796         if (target->state != TARGET_HALTED) {
1797                 LOG_WARNING("target not halted");
1798                 return ERROR_TARGET_NOT_HALTED;
1799         }
1800
1801         /* Mark register X0 as dirty, as it will be used
1802          * for transferring the data.
1803          * It will be restored automatically when exiting
1804          * debug mode
1805          */
1806         armv8_reg_current(arm, 0)->dirty = true;
1807
1808         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1809
1810         /* Read DSCR */
1811         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1812                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1813         if (retval != ERROR_OK)
1814                 return retval;
1815
1816         /* Set Normal access mode  */
1817         dscr = (dscr & ~DSCR_MA);
1818         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1819                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1820         if (retval != ERROR_OK)
1821                 return retval;
1822
1823         if (arm->core_state == ARM_STATE_AARCH64) {
1824                 /* Write X0 with value 'address' using write procedure */
1825                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1826                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1827                 retval = dpm->instr_write_data_dcc_64(dpm,
1828                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1829         } else {
1830                 /* Write R0 with value 'address' using write procedure */
1831                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1832                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1833                 retval = dpm->instr_write_data_dcc(dpm,
1834                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1835         }
1836
1837         if (retval != ERROR_OK)
1838                 return retval;
1839
1840         if (size == 4 && (address % 4) == 0)
1841                 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1842         else
1843                 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1844
1845         if (retval != ERROR_OK) {
1846                 /* Unset DTR mode */
1847                 mem_ap_read_atomic_u32(armv8->debug_ap,
1848                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1849                 dscr &= ~DSCR_MA;
1850                 mem_ap_write_atomic_u32(armv8->debug_ap,
1851                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1852         }
1853
1854         /* Check for sticky abort flags in the DSCR */
1855         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1856                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1857         if (retval != ERROR_OK)
1858                 return retval;
1859
1860         dpm->dscr = dscr;
1861         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1862                 /* Abort occurred - clear it and exit */
1863                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1864                 armv8_dpm_handle_exception(dpm, true);
1865                 return ERROR_FAIL;
1866         }
1867
1868         /* Done */
1869         return ERROR_OK;
1870 }
1871
1872 static int aarch64_read_cpu_memory_slow(struct target *target,
1873         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1874 {
1875         struct armv8_common *armv8 = target_to_armv8(target);
1876         struct arm_dpm *dpm = &armv8->dpm;
1877         struct arm *arm = &armv8->arm;
1878         int retval;
1879
1880         armv8_reg_current(arm, 1)->dirty = true;
1881
1882         /* change DCC to normal mode (if necessary) */
1883         if (*dscr & DSCR_MA) {
1884                 *dscr &= DSCR_MA;
1885                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1886                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1887                 if (retval != ERROR_OK)
1888                         return retval;
1889         }
1890
1891         while (count) {
1892                 uint32_t opcode, data;
1893
1894                 if (size == 1)
1895                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1896                 else if (size == 2)
1897                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1898                 else
1899                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1900                 retval = dpm->instr_execute(dpm, opcode);
1901                 if (retval != ERROR_OK)
1902                         return retval;
1903
1904                 if (arm->core_state == ARM_STATE_AARCH64)
1905                         retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1906                 else
1907                         retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1908                 if (retval != ERROR_OK)
1909                         return retval;
1910
1911                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1912                                 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1913                 if (retval != ERROR_OK)
1914                         return retval;
1915
1916                 if (size == 1)
1917                         *buffer = (uint8_t)data;
1918                 else if (size == 2)
1919                         target_buffer_set_u16(target, buffer, (uint16_t)data);
1920                 else
1921                         target_buffer_set_u32(target, buffer, data);
1922
1923                 /* Advance */
1924                 buffer += size;
1925                 --count;
1926         }
1927
1928         return ERROR_OK;
1929 }
1930
1931 static int aarch64_read_cpu_memory_fast(struct target *target,
1932         uint32_t count, uint8_t *buffer, uint32_t *dscr)
1933 {
1934         struct armv8_common *armv8 = target_to_armv8(target);
1935         struct arm_dpm *dpm = &armv8->dpm;
1936         struct arm *arm = &armv8->arm;
1937         int retval;
1938         uint32_t value;
1939
1940         /* Mark X1 as dirty */
1941         armv8_reg_current(arm, 1)->dirty = true;
1942
1943         if (arm->core_state == ARM_STATE_AARCH64) {
1944                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1945                 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1946         } else {
1947                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1948                 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1949         }
1950
1951         if (retval != ERROR_OK)
1952                 return retval;
1953
1954         /* Step 1.e - Change DCC to memory mode */
1955         *dscr |= DSCR_MA;
1956         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1957                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1958         if (retval != ERROR_OK)
1959                 return retval;
1960
1961         /* Step 1.f - read DBGDTRTX and discard the value */
1962         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1963                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1964         if (retval != ERROR_OK)
1965                 return retval;
1966
1967         count--;
1968         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1969          * Abort flags are sticky, so can be read at end of transactions
1970          *
1971          * This data is read in aligned to 32 bit boundary.
1972          */
1973
1974         if (count) {
1975                 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1976                  * increments X0 by 4. */
1977                 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1978                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1979                 if (retval != ERROR_OK)
1980                         return retval;
1981         }
1982
1983         /* Step 3.a - set DTR access mode back to Normal mode   */
1984         *dscr &= ~DSCR_MA;
1985         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1986                                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1987         if (retval != ERROR_OK)
1988                 return retval;
1989
1990         /* Step 3.b - read DBGDTRTX for the final value */
1991         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1992                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1993         if (retval != ERROR_OK)
1994                 return retval;
1995
1996         target_buffer_set_u32(target, buffer + count * 4, value);
1997         return retval;
1998 }
1999
2000 static int aarch64_read_cpu_memory(struct target *target,
2001         target_addr_t address, uint32_t size,
2002         uint32_t count, uint8_t *buffer)
2003 {
2004         /* read memory through APB-AP */
2005         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2006         struct armv8_common *armv8 = target_to_armv8(target);
2007         struct arm_dpm *dpm = &armv8->dpm;
2008         struct arm *arm = &armv8->arm;
2009         uint32_t dscr;
2010
2011         LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2012                         address, size, count);
2013
2014         if (target->state != TARGET_HALTED) {
2015                 LOG_WARNING("target not halted");
2016                 return ERROR_TARGET_NOT_HALTED;
2017         }
2018
2019         /* Mark register X0 as dirty, as it will be used
2020          * for transferring the data.
2021          * It will be restored automatically when exiting
2022          * debug mode
2023          */
2024         armv8_reg_current(arm, 0)->dirty = true;
2025
2026         /* Read DSCR */
2027         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2028                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2029         if (retval != ERROR_OK)
2030                 return retval;
2031
2032         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2033
2034         /* Set Normal access mode  */
2035         dscr &= ~DSCR_MA;
2036         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2037                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2038         if (retval != ERROR_OK)
2039                 return retval;
2040
2041         if (arm->core_state == ARM_STATE_AARCH64) {
2042                 /* Write X0 with value 'address' using write procedure */
2043                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2044                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2045                 retval = dpm->instr_write_data_dcc_64(dpm,
2046                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2047         } else {
2048                 /* Write R0 with value 'address' using write procedure */
2049                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2050                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2051                 retval = dpm->instr_write_data_dcc(dpm,
2052                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2053         }
2054
2055         if (retval != ERROR_OK)
2056                 return retval;
2057
2058         if (size == 4 && (address % 4) == 0)
2059                 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2060         else
2061                 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2062
2063         if (dscr & DSCR_MA) {
2064                 dscr &= ~DSCR_MA;
2065                 mem_ap_write_atomic_u32(armv8->debug_ap,
2066                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2067         }
2068
2069         if (retval != ERROR_OK)
2070                 return retval;
2071
2072         /* Check for sticky abort flags in the DSCR */
2073         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2074                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2075         if (retval != ERROR_OK)
2076                 return retval;
2077
2078         dpm->dscr = dscr;
2079
2080         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2081                 /* Abort occurred - clear it and exit */
2082                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2083                 armv8_dpm_handle_exception(dpm, true);
2084                 return ERROR_FAIL;
2085         }
2086
2087         /* Done */
2088         return ERROR_OK;
2089 }
2090
2091 static int aarch64_read_phys_memory(struct target *target,
2092         target_addr_t address, uint32_t size,
2093         uint32_t count, uint8_t *buffer)
2094 {
2095         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2096
2097         if (count && buffer) {
2098                 /* read memory through APB-AP */
2099                 retval = aarch64_mmu_modify(target, 0);
2100                 if (retval != ERROR_OK)
2101                         return retval;
2102                 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2103         }
2104         return retval;
2105 }
2106
2107 static int aarch64_read_memory(struct target *target, target_addr_t address,
2108         uint32_t size, uint32_t count, uint8_t *buffer)
2109 {
2110         int mmu_enabled = 0;
2111         int retval;
2112
2113         /* determine if MMU was enabled on target stop */
2114         retval = aarch64_mmu(target, &mmu_enabled);
2115         if (retval != ERROR_OK)
2116                 return retval;
2117
2118         if (mmu_enabled) {
2119                 /* enable MMU as we could have disabled it for phys access */
2120                 retval = aarch64_mmu_modify(target, 1);
2121                 if (retval != ERROR_OK)
2122                         return retval;
2123         }
2124         return aarch64_read_cpu_memory(target, address, size, count, buffer);
2125 }
2126
2127 static int aarch64_write_phys_memory(struct target *target,
2128         target_addr_t address, uint32_t size,
2129         uint32_t count, const uint8_t *buffer)
2130 {
2131         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2132
2133         if (count && buffer) {
2134                 /* write memory through APB-AP */
2135                 retval = aarch64_mmu_modify(target, 0);
2136                 if (retval != ERROR_OK)
2137                         return retval;
2138                 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2139         }
2140
2141         return retval;
2142 }
2143
2144 static int aarch64_write_memory(struct target *target, target_addr_t address,
2145         uint32_t size, uint32_t count, const uint8_t *buffer)
2146 {
2147         int mmu_enabled = 0;
2148         int retval;
2149
2150         /* determine if MMU was enabled on target stop */
2151         retval = aarch64_mmu(target, &mmu_enabled);
2152         if (retval != ERROR_OK)
2153                 return retval;
2154
2155         if (mmu_enabled) {
2156                 /* enable MMU as we could have disabled it for phys access */
2157                 retval = aarch64_mmu_modify(target, 1);
2158                 if (retval != ERROR_OK)
2159                         return retval;
2160         }
2161         return aarch64_write_cpu_memory(target, address, size, count, buffer);
2162 }
2163
2164 static int aarch64_handle_target_request(void *priv)
2165 {
2166         struct target *target = priv;
2167         struct armv8_common *armv8 = target_to_armv8(target);
2168         int retval;
2169
2170         if (!target_was_examined(target))
2171                 return ERROR_OK;
2172         if (!target->dbg_msg_enabled)
2173                 return ERROR_OK;
2174
2175         if (target->state == TARGET_RUNNING) {
2176                 uint32_t request;
2177                 uint32_t dscr;
2178                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2179                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2180
2181                 /* check if we have data */
2182                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2183                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2184                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2185                         if (retval == ERROR_OK) {
2186                                 target_request(target, request);
2187                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2188                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2189                         }
2190                 }
2191         }
2192
2193         return ERROR_OK;
2194 }
2195
2196 static int aarch64_examine_first(struct target *target)
2197 {
2198         struct aarch64_common *aarch64 = target_to_aarch64(target);
2199         struct armv8_common *armv8 = &aarch64->armv8_common;
2200         struct adiv5_dap *swjdp = armv8->arm.dap;
2201         uint32_t cti_base;
2202         int i;
2203         int retval = ERROR_OK;
2204         uint64_t debug, ttypr;
2205         uint32_t cpuid;
2206         uint32_t tmp0, tmp1, tmp2, tmp3;
2207         debug = ttypr = cpuid = 0;
2208
2209         retval = dap_dp_init(swjdp);
2210         if (retval != ERROR_OK)
2211                 return retval;
2212
2213         /* Search for the APB-AB - it is needed for access to debug registers */
2214         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2215         if (retval != ERROR_OK) {
2216                 LOG_ERROR("Could not find APB-AP for debug access");
2217                 return retval;
2218         }
2219
2220         retval = mem_ap_init(armv8->debug_ap);
2221         if (retval != ERROR_OK) {
2222                 LOG_ERROR("Could not initialize the APB-AP");
2223                 return retval;
2224         }
2225
2226         armv8->debug_ap->memaccess_tck = 10;
2227
2228         if (!target->dbgbase_set) {
2229                 uint32_t dbgbase;
2230                 /* Get ROM Table base */
2231                 uint32_t apid;
2232                 int32_t coreidx = target->coreid;
2233                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2234                 if (retval != ERROR_OK)
2235                         return retval;
2236                 /* Lookup 0x15 -- Processor DAP */
2237                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2238                                 &armv8->debug_base, &coreidx);
2239                 if (retval != ERROR_OK)
2240                         return retval;
2241                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2242                                 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2243         } else
2244                 armv8->debug_base = target->dbgbase;
2245
2246         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2247                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2248         if (retval != ERROR_OK) {
2249                 LOG_DEBUG("Examine %s failed", "oslock");
2250                 return retval;
2251         }
2252
2253         retval = mem_ap_read_u32(armv8->debug_ap,
2254                         armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2255         if (retval != ERROR_OK) {
2256                 LOG_DEBUG("Examine %s failed", "CPUID");
2257                 return retval;
2258         }
2259
2260         retval = mem_ap_read_u32(armv8->debug_ap,
2261                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2262         retval += mem_ap_read_u32(armv8->debug_ap,
2263                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2264         if (retval != ERROR_OK) {
2265                 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2266                 return retval;
2267         }
2268         retval = mem_ap_read_u32(armv8->debug_ap,
2269                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2270         retval += mem_ap_read_u32(armv8->debug_ap,
2271                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2272         if (retval != ERROR_OK) {
2273                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2274                 return retval;
2275         }
2276
2277         retval = dap_run(armv8->debug_ap->dap);
2278         if (retval != ERROR_OK) {
2279                 LOG_ERROR("%s: examination failed\n", target_name(target));
2280                 return retval;
2281         }
2282
2283         ttypr |= tmp1;
2284         ttypr = (ttypr << 32) | tmp0;
2285         debug |= tmp3;
2286         debug = (debug << 32) | tmp2;
2287
2288         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2289         LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2290         LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2291
2292         if (target->ctibase == 0) {
2293                 /* assume a v8 rom table layout */
2294                 cti_base = armv8->debug_base + 0x10000;
2295                 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2296         } else
2297                 cti_base = target->ctibase;
2298
2299         armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2300         if (armv8->cti == NULL)
2301                 return ERROR_FAIL;
2302
2303         retval = aarch64_dpm_setup(aarch64, debug);
2304         if (retval != ERROR_OK)
2305                 return retval;
2306
2307         /* Setup Breakpoint Register Pairs */
2308         aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2309         aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2310         aarch64->brp_num_available = aarch64->brp_num;
2311         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2312         for (i = 0; i < aarch64->brp_num; i++) {
2313                 aarch64->brp_list[i].used = 0;
2314                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2315                         aarch64->brp_list[i].type = BRP_NORMAL;
2316                 else
2317                         aarch64->brp_list[i].type = BRP_CONTEXT;
2318                 aarch64->brp_list[i].value = 0;
2319                 aarch64->brp_list[i].control = 0;
2320                 aarch64->brp_list[i].BRPn = i;
2321         }
2322
2323         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2324
2325         target->state = TARGET_UNKNOWN;
2326         target->debug_reason = DBG_REASON_NOTHALTED;
2327         aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2328         target_set_examined(target);
2329         return ERROR_OK;
2330 }
2331
2332 static int aarch64_examine(struct target *target)
2333 {
2334         int retval = ERROR_OK;
2335
2336         /* don't re-probe hardware after each reset */
2337         if (!target_was_examined(target))
2338                 retval = aarch64_examine_first(target);
2339
2340         /* Configure core debug access */
2341         if (retval == ERROR_OK)
2342                 retval = aarch64_init_debug_access(target);
2343
2344         return retval;
2345 }
2346
2347 /*
2348  *      Cortex-A8 target creation and initialization
2349  */
2350
2351 static int aarch64_init_target(struct command_context *cmd_ctx,
2352         struct target *target)
2353 {
2354         /* examine_first() does a bunch of this */
2355         return ERROR_OK;
2356 }
2357
2358 static int aarch64_init_arch_info(struct target *target,
2359         struct aarch64_common *aarch64, struct jtag_tap *tap)
2360 {
2361         struct armv8_common *armv8 = &aarch64->armv8_common;
2362
2363         /* Setup struct aarch64_common */
2364         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2365         /*  tap has no dap initialized */
2366         if (!tap->dap) {
2367                 tap->dap = dap_init();
2368                 tap->dap->tap = tap;
2369         }
2370         armv8->arm.dap = tap->dap;
2371
2372         /* register arch-specific functions */
2373         armv8->examine_debug_reason = NULL;
2374         armv8->post_debug_entry = aarch64_post_debug_entry;
2375         armv8->pre_restore_context = NULL;
2376         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2377
2378         armv8_init_arch_info(target, armv8);
2379         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2380
2381         return ERROR_OK;
2382 }
2383
2384 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2385 {
2386         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2387
2388         return aarch64_init_arch_info(target, aarch64, target->tap);
2389 }
2390
2391 static int aarch64_mmu(struct target *target, int *enabled)
2392 {
2393         if (target->state != TARGET_HALTED) {
2394                 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2395                 return ERROR_TARGET_INVALID;
2396         }
2397
2398         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2399         return ERROR_OK;
2400 }
2401
2402 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2403                              target_addr_t *phys)
2404 {
2405         return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2406 }
2407
2408 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2409 {
2410         struct target *target = get_current_target(CMD_CTX);
2411         struct armv8_common *armv8 = target_to_armv8(target);
2412
2413         return armv8_handle_cache_info_command(CMD_CTX,
2414                         &armv8->armv8_mmu.armv8_cache);
2415 }
2416
2417
2418 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2419 {
2420         struct target *target = get_current_target(CMD_CTX);
2421         if (!target_was_examined(target)) {
2422                 LOG_ERROR("target not examined yet");
2423                 return ERROR_FAIL;
2424         }
2425
2426         return aarch64_init_debug_access(target);
2427 }
2428 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2429 {
2430         struct target *target = get_current_target(CMD_CTX);
2431         /* check target is an smp target */
2432         struct target_list *head;
2433         struct target *curr;
2434         head = target->head;
2435         target->smp = 0;
2436         if (head != (struct target_list *)NULL) {
2437                 while (head != (struct target_list *)NULL) {
2438                         curr = head->target;
2439                         curr->smp = 0;
2440                         head = head->next;
2441                 }
2442                 /*  fixes the target display to the debugger */
2443                 target->gdb_service->target = target;
2444         }
2445         return ERROR_OK;
2446 }
2447
2448 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2449 {
2450         struct target *target = get_current_target(CMD_CTX);
2451         struct target_list *head;
2452         struct target *curr;
2453         head = target->head;
2454         if (head != (struct target_list *)NULL) {
2455                 target->smp = 1;
2456                 while (head != (struct target_list *)NULL) {
2457                         curr = head->target;
2458                         curr->smp = 1;
2459                         head = head->next;
2460                 }
2461         }
2462         return ERROR_OK;
2463 }
2464
2465 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2466 {
2467         struct target *target = get_current_target(CMD_CTX);
2468         struct aarch64_common *aarch64 = target_to_aarch64(target);
2469
2470         static const Jim_Nvp nvp_maskisr_modes[] = {
2471                 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2472                 { .name = "on", .value = AARCH64_ISRMASK_ON },
2473                 { .name = NULL, .value = -1 },
2474         };
2475         const Jim_Nvp *n;
2476
2477         if (CMD_ARGC > 0) {
2478                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2479                 if (n->name == NULL) {
2480                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2481                         return ERROR_COMMAND_SYNTAX_ERROR;
2482                 }
2483
2484                 aarch64->isrmasking_mode = n->value;
2485         }
2486
2487         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2488         command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2489
2490         return ERROR_OK;
2491 }
2492
2493 static const struct command_registration aarch64_exec_command_handlers[] = {
2494         {
2495                 .name = "cache_info",
2496                 .handler = aarch64_handle_cache_info_command,
2497                 .mode = COMMAND_EXEC,
2498                 .help = "display information about target caches",
2499                 .usage = "",
2500         },
2501         {
2502                 .name = "dbginit",
2503                 .handler = aarch64_handle_dbginit_command,
2504                 .mode = COMMAND_EXEC,
2505                 .help = "Initialize core debug",
2506                 .usage = "",
2507         },
2508         {       .name = "smp_off",
2509                 .handler = aarch64_handle_smp_off_command,
2510                 .mode = COMMAND_EXEC,
2511                 .help = "Stop smp handling",
2512                 .usage = "",
2513         },
2514         {
2515                 .name = "smp_on",
2516                 .handler = aarch64_handle_smp_on_command,
2517                 .mode = COMMAND_EXEC,
2518                 .help = "Restart smp handling",
2519                 .usage = "",
2520         },
2521         {
2522                 .name = "maskisr",
2523                 .handler = aarch64_mask_interrupts_command,
2524                 .mode = COMMAND_ANY,
2525                 .help = "mask aarch64 interrupts during single-step",
2526                 .usage = "['on'|'off']",
2527         },
2528
2529         COMMAND_REGISTRATION_DONE
2530 };
2531 static const struct command_registration aarch64_command_handlers[] = {
2532         {
2533                 .chain = armv8_command_handlers,
2534         },
2535         {
2536                 .name = "aarch64",
2537                 .mode = COMMAND_ANY,
2538                 .help = "Aarch64 command group",
2539                 .usage = "",
2540                 .chain = aarch64_exec_command_handlers,
2541         },
2542         COMMAND_REGISTRATION_DONE
2543 };
2544
2545 struct target_type aarch64_target = {
2546         .name = "aarch64",
2547
2548         .poll = aarch64_poll,
2549         .arch_state = armv8_arch_state,
2550
2551         .halt = aarch64_halt,
2552         .resume = aarch64_resume,
2553         .step = aarch64_step,
2554
2555         .assert_reset = aarch64_assert_reset,
2556         .deassert_reset = aarch64_deassert_reset,
2557
2558         /* REVISIT allow exporting VFP3 registers ... */
2559         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2560
2561         .read_memory = aarch64_read_memory,
2562         .write_memory = aarch64_write_memory,
2563
2564         .add_breakpoint = aarch64_add_breakpoint,
2565         .add_context_breakpoint = aarch64_add_context_breakpoint,
2566         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2567         .remove_breakpoint = aarch64_remove_breakpoint,
2568         .add_watchpoint = NULL,
2569         .remove_watchpoint = NULL,
2570
2571         .commands = aarch64_command_handlers,
2572         .target_create = aarch64_target_create,
2573         .init_target = aarch64_init_target,
2574         .examine = aarch64_examine,
2575
2576         .read_phys_memory = aarch64_read_phys_memory,
2577         .write_phys_memory = aarch64_write_phys_memory,
2578         .mmu = aarch64_mmu,
2579         .virt2phys = aarch64_virt2phys,
2580 };