aarch64: support for aarch32 ARM_MODE_SYS
[fw/openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
33
34 enum restart_mode {
35         RESTART_LAZY,
36         RESTART_SYNC,
37 };
38
39 enum halt_mode {
40         HALT_LAZY,
41         HALT_SYNC,
42 };
43
44 struct aarch64_private_config {
45         struct adiv5_private_config adiv5_config;
46         struct arm_cti *cti;
47 };
48
49 static int aarch64_poll(struct target *target);
50 static int aarch64_debug_entry(struct target *target);
51 static int aarch64_restore_context(struct target *target, bool bpwp);
52 static int aarch64_set_breakpoint(struct target *target,
53         struct breakpoint *breakpoint, uint8_t matchmode);
54 static int aarch64_set_context_breakpoint(struct target *target,
55         struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_hybrid_breakpoint(struct target *target,
57         struct breakpoint *breakpoint);
58 static int aarch64_unset_breakpoint(struct target *target,
59         struct breakpoint *breakpoint);
60 static int aarch64_mmu(struct target *target, int *enabled);
61 static int aarch64_virt2phys(struct target *target,
62         target_addr_t virt, target_addr_t *phys);
63 static int aarch64_read_cpu_memory(struct target *target,
64         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
65
66 #define foreach_smp_target(pos, head) \
67         for (pos = head; (pos != NULL); pos = pos->next)
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71         enum arm_mode target_mode = ARM_MODE_ANY;
72         int retval = ERROR_OK;
73         uint32_t instr;
74
75         struct aarch64_common *aarch64 = target_to_aarch64(target);
76         struct armv8_common *armv8 = target_to_armv8(target);
77
78         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82                 switch (armv8->arm.core_mode) {
83                 case ARMV8_64_EL0T:
84                         target_mode = ARMV8_64_EL1H;
85                         /* fall through */
86                 case ARMV8_64_EL1T:
87                 case ARMV8_64_EL1H:
88                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89                         break;
90                 case ARMV8_64_EL2T:
91                 case ARMV8_64_EL2H:
92                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93                         break;
94                 case ARMV8_64_EL3H:
95                 case ARMV8_64_EL3T:
96                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97                         break;
98
99                 case ARM_MODE_SVC:
100                 case ARM_MODE_ABT:
101                 case ARM_MODE_FIQ:
102                 case ARM_MODE_IRQ:
103                 case ARM_MODE_SYS:
104                         instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
105                         break;
106
107                 default:
108                         LOG_INFO("cannot read system control register in this mode");
109                         return ERROR_FAIL;
110                 }
111
112                 if (target_mode != ARM_MODE_ANY)
113                         armv8_dpm_modeswitch(&armv8->dpm, target_mode);
114
115                 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
116                 if (retval != ERROR_OK)
117                         return retval;
118
119                 if (target_mode != ARM_MODE_ANY)
120                         armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
121         }
122
123         return retval;
124 }
125
126 /*  modify system_control_reg in order to enable or disable mmu for :
127  *  - virt2phys address conversion
128  *  - read or write memory in phys or virt address */
129 static int aarch64_mmu_modify(struct target *target, int enable)
130 {
131         struct aarch64_common *aarch64 = target_to_aarch64(target);
132         struct armv8_common *armv8 = &aarch64->armv8_common;
133         int retval = ERROR_OK;
134         uint32_t instr = 0;
135
136         if (enable) {
137                 /*      if mmu enabled at target stop and mmu not enable */
138                 if (!(aarch64->system_control_reg & 0x1U)) {
139                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
140                         return ERROR_FAIL;
141                 }
142                 if (!(aarch64->system_control_reg_curr & 0x1U))
143                         aarch64->system_control_reg_curr |= 0x1U;
144         } else {
145                 if (aarch64->system_control_reg_curr & 0x4U) {
146                         /*  data cache is active */
147                         aarch64->system_control_reg_curr &= ~0x4U;
148                         /* flush data cache armv8 function to be called */
149                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
150                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
151                 }
152                 if ((aarch64->system_control_reg_curr & 0x1U)) {
153                         aarch64->system_control_reg_curr &= ~0x1U;
154                 }
155         }
156
157         switch (armv8->arm.core_mode) {
158         case ARMV8_64_EL0T:
159         case ARMV8_64_EL1T:
160         case ARMV8_64_EL1H:
161                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
162                 break;
163         case ARMV8_64_EL2T:
164         case ARMV8_64_EL2H:
165                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
166                 break;
167         case ARMV8_64_EL3H:
168         case ARMV8_64_EL3T:
169                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
170                 break;
171
172         case ARM_MODE_SVC:
173         case ARM_MODE_ABT:
174         case ARM_MODE_FIQ:
175         case ARM_MODE_IRQ:
176         case ARM_MODE_SYS:
177                 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
178                 break;
179
180         default:
181                 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
182                 break;
183         }
184
185         retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
186                                 aarch64->system_control_reg_curr);
187         return retval;
188 }
189
190 /*
191  * Basic debug access, very low level assumes state is saved
192  */
193 static int aarch64_init_debug_access(struct target *target)
194 {
195         struct armv8_common *armv8 = target_to_armv8(target);
196         int retval;
197         uint32_t dummy;
198
199         LOG_DEBUG("%s", target_name(target));
200
201         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
202                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
203         if (retval != ERROR_OK) {
204                 LOG_DEBUG("Examine %s failed", "oslock");
205                 return retval;
206         }
207
208         /* Clear Sticky Power Down status Bit in PRSR to enable access to
209            the registers in the Core Power Domain */
210         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
211                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
212         if (retval != ERROR_OK)
213                 return retval;
214
215         /*
216          * Static CTI configuration:
217          * Channel 0 -> trigger outputs HALT request to PE
218          * Channel 1 -> trigger outputs Resume request to PE
219          * Gate all channel trigger events from entering the CTM
220          */
221
222         /* Enable CTI */
223         retval = arm_cti_enable(armv8->cti, true);
224         /* By default, gate all channel events to and from the CTM */
225         if (retval == ERROR_OK)
226                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
227         /* output halt requests to PE on channel 0 event */
228         if (retval == ERROR_OK)
229                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
230         /* output restart requests to PE on channel 1 event */
231         if (retval == ERROR_OK)
232                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
233         if (retval != ERROR_OK)
234                 return retval;
235
236         /* Resync breakpoint registers */
237
238         return ERROR_OK;
239 }
240
241 /* Write to memory mapped registers directly with no cache or mmu handling */
242 static int aarch64_dap_write_memap_register_u32(struct target *target,
243         uint32_t address,
244         uint32_t value)
245 {
246         int retval;
247         struct armv8_common *armv8 = target_to_armv8(target);
248
249         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
250
251         return retval;
252 }
253
254 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
255 {
256         struct arm_dpm *dpm = &a8->armv8_common.dpm;
257         int retval;
258
259         dpm->arm = &a8->armv8_common.arm;
260         dpm->didr = debug;
261
262         retval = armv8_dpm_setup(dpm);
263         if (retval == ERROR_OK)
264                 retval = armv8_dpm_initialize(dpm);
265
266         return retval;
267 }
268
269 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
270 {
271         struct armv8_common *armv8 = target_to_armv8(target);
272         return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
273 }
274
275 static int aarch64_check_state_one(struct target *target,
276                 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
277 {
278         struct armv8_common *armv8 = target_to_armv8(target);
279         uint32_t prsr;
280         int retval;
281
282         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
283                         armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
284         if (retval != ERROR_OK)
285                 return retval;
286
287         if (p_prsr)
288                 *p_prsr = prsr;
289
290         if (p_result)
291                 *p_result = (prsr & mask) == (val & mask);
292
293         return ERROR_OK;
294 }
295
296 static int aarch64_wait_halt_one(struct target *target)
297 {
298         int retval = ERROR_OK;
299         uint32_t prsr;
300
301         int64_t then = timeval_ms();
302         for (;;) {
303                 int halted;
304
305                 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
306                 if (retval != ERROR_OK || halted)
307                         break;
308
309                 if (timeval_ms() > then + 1000) {
310                         retval = ERROR_TARGET_TIMEOUT;
311                         LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
312                         break;
313                 }
314         }
315         return retval;
316 }
317
318 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
319 {
320         int retval = ERROR_OK;
321         struct target_list *head = target->head;
322         struct target *first = NULL;
323
324         LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
325
326         while (head != NULL) {
327                 struct target *curr = head->target;
328                 struct armv8_common *armv8 = target_to_armv8(curr);
329                 head = head->next;
330
331                 if (exc_target && curr == target)
332                         continue;
333                 if (!target_was_examined(curr))
334                         continue;
335                 if (curr->state != TARGET_RUNNING)
336                         continue;
337
338                 /* HACK: mark this target as prepared for halting */
339                 curr->debug_reason = DBG_REASON_DBGRQ;
340
341                 /* open the gate for channel 0 to let HALT requests pass to the CTM */
342                 retval = arm_cti_ungate_channel(armv8->cti, 0);
343                 if (retval == ERROR_OK)
344                         retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
345                 if (retval != ERROR_OK)
346                         break;
347
348                 LOG_DEBUG("target %s prepared", target_name(curr));
349
350                 if (first == NULL)
351                         first = curr;
352         }
353
354         if (p_first) {
355                 if (exc_target && first)
356                         *p_first = first;
357                 else
358                         *p_first = target;
359         }
360
361         return retval;
362 }
363
364 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
365 {
366         int retval = ERROR_OK;
367         struct armv8_common *armv8 = target_to_armv8(target);
368
369         LOG_DEBUG("%s", target_name(target));
370
371         /* allow Halting Debug Mode */
372         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
373         if (retval != ERROR_OK)
374                 return retval;
375
376         /* trigger an event on channel 0, this outputs a halt request to the PE */
377         retval = arm_cti_pulse_channel(armv8->cti, 0);
378         if (retval != ERROR_OK)
379                 return retval;
380
381         if (mode == HALT_SYNC) {
382                 retval = aarch64_wait_halt_one(target);
383                 if (retval != ERROR_OK) {
384                         if (retval == ERROR_TARGET_TIMEOUT)
385                                 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
386                         return retval;
387                 }
388         }
389
390         return ERROR_OK;
391 }
392
393 static int aarch64_halt_smp(struct target *target, bool exc_target)
394 {
395         struct target *next = target;
396         int retval;
397
398         /* prepare halt on all PEs of the group */
399         retval = aarch64_prepare_halt_smp(target, exc_target, &next);
400
401         if (exc_target && next == target)
402                 return retval;
403
404         /* halt the target PE */
405         if (retval == ERROR_OK)
406                 retval = aarch64_halt_one(next, HALT_LAZY);
407
408         if (retval != ERROR_OK)
409                 return retval;
410
411         /* wait for all PEs to halt */
412         int64_t then = timeval_ms();
413         for (;;) {
414                 bool all_halted = true;
415                 struct target_list *head;
416                 struct target *curr;
417
418                 foreach_smp_target(head, target->head) {
419                         int halted;
420
421                         curr = head->target;
422
423                         if (!target_was_examined(curr))
424                                 continue;
425
426                         retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
427                         if (retval != ERROR_OK || !halted) {
428                                 all_halted = false;
429                                 break;
430                         }
431                 }
432
433                 if (all_halted)
434                         break;
435
436                 if (timeval_ms() > then + 1000) {
437                         retval = ERROR_TARGET_TIMEOUT;
438                         break;
439                 }
440
441                 /*
442                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
443                  * and it looks like the CTI's are not connected by a common
444                  * trigger matrix. It seems that we need to halt one core in each
445                  * cluster explicitly. So if we find that a core has not halted
446                  * yet, we trigger an explicit halt for the second cluster.
447                  */
448                 retval = aarch64_halt_one(curr, HALT_LAZY);
449                 if (retval != ERROR_OK)
450                         break;
451         }
452
453         return retval;
454 }
455
456 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
457 {
458         struct target *gdb_target = NULL;
459         struct target_list *head;
460         struct target *curr;
461
462         if (debug_reason == DBG_REASON_NOTHALTED) {
463                 LOG_DEBUG("Halting remaining targets in SMP group");
464                 aarch64_halt_smp(target, true);
465         }
466
467         /* poll all targets in the group, but skip the target that serves GDB */
468         foreach_smp_target(head, target->head) {
469                 curr = head->target;
470                 /* skip calling context */
471                 if (curr == target)
472                         continue;
473                 if (!target_was_examined(curr))
474                         continue;
475                 /* skip targets that were already halted */
476                 if (curr->state == TARGET_HALTED)
477                         continue;
478                 /* remember the gdb_service->target */
479                 if (curr->gdb_service != NULL)
480                         gdb_target = curr->gdb_service->target;
481                 /* skip it */
482                 if (curr == gdb_target)
483                         continue;
484
485                 /* avoid recursion in aarch64_poll() */
486                 curr->smp = 0;
487                 aarch64_poll(curr);
488                 curr->smp = 1;
489         }
490
491         /* after all targets were updated, poll the gdb serving target */
492         if (gdb_target != NULL && gdb_target != target)
493                 aarch64_poll(gdb_target);
494
495         return ERROR_OK;
496 }
497
498 /*
499  * Aarch64 Run control
500  */
501
502 static int aarch64_poll(struct target *target)
503 {
504         enum target_state prev_target_state;
505         int retval = ERROR_OK;
506         int halted;
507
508         retval = aarch64_check_state_one(target,
509                                 PRSR_HALT, PRSR_HALT, &halted, NULL);
510         if (retval != ERROR_OK)
511                 return retval;
512
513         if (halted) {
514                 prev_target_state = target->state;
515                 if (prev_target_state != TARGET_HALTED) {
516                         enum target_debug_reason debug_reason = target->debug_reason;
517
518                         /* We have a halting debug event */
519                         target->state = TARGET_HALTED;
520                         LOG_DEBUG("Target %s halted", target_name(target));
521                         retval = aarch64_debug_entry(target);
522                         if (retval != ERROR_OK)
523                                 return retval;
524
525                         if (target->smp)
526                                 update_halt_gdb(target, debug_reason);
527
528                         if (arm_semihosting(target, &retval) != 0)
529                                 return retval;
530
531                         switch (prev_target_state) {
532                         case TARGET_RUNNING:
533                         case TARGET_UNKNOWN:
534                         case TARGET_RESET:
535                                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
536                                 break;
537                         case TARGET_DEBUG_RUNNING:
538                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
539                                 break;
540                         default:
541                                 break;
542                         }
543                 }
544         } else
545                 target->state = TARGET_RUNNING;
546
547         return retval;
548 }
549
550 static int aarch64_halt(struct target *target)
551 {
552         struct armv8_common *armv8 = target_to_armv8(target);
553         armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
554
555         if (target->smp)
556                 return aarch64_halt_smp(target, false);
557
558         return aarch64_halt_one(target, HALT_SYNC);
559 }
560
561 static int aarch64_restore_one(struct target *target, int current,
562         uint64_t *address, int handle_breakpoints, int debug_execution)
563 {
564         struct armv8_common *armv8 = target_to_armv8(target);
565         struct arm *arm = &armv8->arm;
566         int retval;
567         uint64_t resume_pc;
568
569         LOG_DEBUG("%s", target_name(target));
570
571         if (!debug_execution)
572                 target_free_all_working_areas(target);
573
574         /* current = 1: continue on current pc, otherwise continue at <address> */
575         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
576         if (!current)
577                 resume_pc = *address;
578         else
579                 *address = resume_pc;
580
581         /* Make sure that the Armv7 gdb thumb fixups does not
582          * kill the return address
583          */
584         switch (arm->core_state) {
585                 case ARM_STATE_ARM:
586                         resume_pc &= 0xFFFFFFFC;
587                         break;
588                 case ARM_STATE_AARCH64:
589                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
590                         break;
591                 case ARM_STATE_THUMB:
592                 case ARM_STATE_THUMB_EE:
593                         /* When the return address is loaded into PC
594                          * bit 0 must be 1 to stay in Thumb state
595                          */
596                         resume_pc |= 0x1;
597                         break;
598                 case ARM_STATE_JAZELLE:
599                         LOG_ERROR("How do I resume into Jazelle state??");
600                         return ERROR_FAIL;
601         }
602         LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
603         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
604         arm->pc->dirty = 1;
605         arm->pc->valid = 1;
606
607         /* called it now before restoring context because it uses cpu
608          * register r0 for restoring system control register */
609         retval = aarch64_restore_system_control_reg(target);
610         if (retval == ERROR_OK)
611                 retval = aarch64_restore_context(target, handle_breakpoints);
612
613         return retval;
614 }
615
616 /**
617  * prepare single target for restart
618  *
619  *
620  */
621 static int aarch64_prepare_restart_one(struct target *target)
622 {
623         struct armv8_common *armv8 = target_to_armv8(target);
624         int retval;
625         uint32_t dscr;
626         uint32_t tmp;
627
628         LOG_DEBUG("%s", target_name(target));
629
630         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
631                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
632         if (retval != ERROR_OK)
633                 return retval;
634
635         if ((dscr & DSCR_ITE) == 0)
636                 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
637         if ((dscr & DSCR_ERR) != 0)
638                 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
639
640         /* acknowledge a pending CTI halt event */
641         retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
642         /*
643          * open the CTI gate for channel 1 so that the restart events
644          * get passed along to all PEs. Also close gate for channel 0
645          * to isolate the PE from halt events.
646          */
647         if (retval == ERROR_OK)
648                 retval = arm_cti_ungate_channel(armv8->cti, 1);
649         if (retval == ERROR_OK)
650                 retval = arm_cti_gate_channel(armv8->cti, 0);
651
652         /* make sure that DSCR.HDE is set */
653         if (retval == ERROR_OK) {
654                 dscr |= DSCR_HDE;
655                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
656                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
657         }
658
659         if (retval == ERROR_OK) {
660                 /* clear sticky bits in PRSR, SDR is now 0 */
661                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
662                                 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
663         }
664
665         return retval;
666 }
667
668 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
669 {
670         struct armv8_common *armv8 = target_to_armv8(target);
671         int retval;
672
673         LOG_DEBUG("%s", target_name(target));
674
675         /* trigger an event on channel 1, generates a restart request to the PE */
676         retval = arm_cti_pulse_channel(armv8->cti, 1);
677         if (retval != ERROR_OK)
678                 return retval;
679
680         if (mode == RESTART_SYNC) {
681                 int64_t then = timeval_ms();
682                 for (;;) {
683                         int resumed;
684                         /*
685                          * if PRSR.SDR is set now, the target did restart, even
686                          * if it's now already halted again (e.g. due to breakpoint)
687                          */
688                         retval = aarch64_check_state_one(target,
689                                                 PRSR_SDR, PRSR_SDR, &resumed, NULL);
690                         if (retval != ERROR_OK || resumed)
691                                 break;
692
693                         if (timeval_ms() > then + 1000) {
694                                 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
695                                 retval = ERROR_TARGET_TIMEOUT;
696                                 break;
697                         }
698                 }
699         }
700
701         if (retval != ERROR_OK)
702                 return retval;
703
704         target->debug_reason = DBG_REASON_NOTHALTED;
705         target->state = TARGET_RUNNING;
706
707         return ERROR_OK;
708 }
709
710 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
711 {
712         int retval;
713
714         LOG_DEBUG("%s", target_name(target));
715
716         retval = aarch64_prepare_restart_one(target);
717         if (retval == ERROR_OK)
718                 retval = aarch64_do_restart_one(target, mode);
719
720         return retval;
721 }
722
723 /*
724  * prepare all but the current target for restart
725  */
726 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
727 {
728         int retval = ERROR_OK;
729         struct target_list *head;
730         struct target *first = NULL;
731         uint64_t address;
732
733         foreach_smp_target(head, target->head) {
734                 struct target *curr = head->target;
735
736                 /* skip calling target */
737                 if (curr == target)
738                         continue;
739                 if (!target_was_examined(curr))
740                         continue;
741                 if (curr->state != TARGET_HALTED)
742                         continue;
743
744                 /*  resume at current address, not in step mode */
745                 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
746                 if (retval == ERROR_OK)
747                         retval = aarch64_prepare_restart_one(curr);
748                 if (retval != ERROR_OK) {
749                         LOG_ERROR("failed to restore target %s", target_name(curr));
750                         break;
751                 }
752                 /* remember the first valid target in the group */
753                 if (first == NULL)
754                         first = curr;
755         }
756
757         if (p_first)
758                 *p_first = first;
759
760         return retval;
761 }
762
763
764 static int aarch64_step_restart_smp(struct target *target)
765 {
766         int retval = ERROR_OK;
767         struct target_list *head;
768         struct target *first = NULL;
769
770         LOG_DEBUG("%s", target_name(target));
771
772         retval = aarch64_prep_restart_smp(target, 0, &first);
773         if (retval != ERROR_OK)
774                 return retval;
775
776         if (first != NULL)
777                 retval = aarch64_do_restart_one(first, RESTART_LAZY);
778         if (retval != ERROR_OK) {
779                 LOG_DEBUG("error restarting target %s", target_name(first));
780                 return retval;
781         }
782
783         int64_t then = timeval_ms();
784         for (;;) {
785                 struct target *curr = target;
786                 bool all_resumed = true;
787
788                 foreach_smp_target(head, target->head) {
789                         uint32_t prsr;
790                         int resumed;
791
792                         curr = head->target;
793
794                         if (curr == target)
795                                 continue;
796
797                         if (!target_was_examined(curr))
798                                 continue;
799
800                         retval = aarch64_check_state_one(curr,
801                                         PRSR_SDR, PRSR_SDR, &resumed, &prsr);
802                         if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
803                                 all_resumed = false;
804                                 break;
805                         }
806
807                         if (curr->state != TARGET_RUNNING) {
808                                 curr->state = TARGET_RUNNING;
809                                 curr->debug_reason = DBG_REASON_NOTHALTED;
810                                 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
811                         }
812                 }
813
814                 if (all_resumed)
815                         break;
816
817                 if (timeval_ms() > then + 1000) {
818                         LOG_ERROR("%s: timeout waiting for target resume", __func__);
819                         retval = ERROR_TARGET_TIMEOUT;
820                         break;
821                 }
822                 /*
823                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
824                  * and it looks like the CTI's are not connected by a common
825                  * trigger matrix. It seems that we need to halt one core in each
826                  * cluster explicitly. So if we find that a core has not halted
827                  * yet, we trigger an explicit resume for the second cluster.
828                  */
829                 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
830                 if (retval != ERROR_OK)
831                         break;
832 }
833
834         return retval;
835 }
836
837 static int aarch64_resume(struct target *target, int current,
838         target_addr_t address, int handle_breakpoints, int debug_execution)
839 {
840         int retval = 0;
841         uint64_t addr = address;
842
843         struct armv8_common *armv8 = target_to_armv8(target);
844         armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
845
846         if (target->state != TARGET_HALTED)
847                 return ERROR_TARGET_NOT_HALTED;
848
849         /*
850          * If this target is part of a SMP group, prepare the others
851          * targets for resuming. This involves restoring the complete
852          * target register context and setting up CTI gates to accept
853          * resume events from the trigger matrix.
854          */
855         if (target->smp) {
856                 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
857                 if (retval != ERROR_OK)
858                         return retval;
859         }
860
861         /* all targets prepared, restore and restart the current target */
862         retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
863                                  debug_execution);
864         if (retval == ERROR_OK)
865                 retval = aarch64_restart_one(target, RESTART_SYNC);
866         if (retval != ERROR_OK)
867                 return retval;
868
869         if (target->smp) {
870                 int64_t then = timeval_ms();
871                 for (;;) {
872                         struct target *curr = target;
873                         struct target_list *head;
874                         bool all_resumed = true;
875
876                         foreach_smp_target(head, target->head) {
877                                 uint32_t prsr;
878                                 int resumed;
879
880                                 curr = head->target;
881                                 if (curr == target)
882                                         continue;
883                                 if (!target_was_examined(curr))
884                                         continue;
885
886                                 retval = aarch64_check_state_one(curr,
887                                                 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
888                                 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
889                                         all_resumed = false;
890                                         break;
891                                 }
892
893                                 if (curr->state != TARGET_RUNNING) {
894                                         curr->state = TARGET_RUNNING;
895                                         curr->debug_reason = DBG_REASON_NOTHALTED;
896                                         target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
897                                 }
898                         }
899
900                         if (all_resumed)
901                                 break;
902
903                         if (timeval_ms() > then + 1000) {
904                                 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
905                                 retval = ERROR_TARGET_TIMEOUT;
906                                 break;
907                         }
908
909                         /*
910                          * HACK: on Hi6220 there are 8 cores organized in 2 clusters
911                          * and it looks like the CTI's are not connected by a common
912                          * trigger matrix. It seems that we need to halt one core in each
913                          * cluster explicitly. So if we find that a core has not halted
914                          * yet, we trigger an explicit resume for the second cluster.
915                          */
916                         retval = aarch64_do_restart_one(curr, RESTART_LAZY);
917                         if (retval != ERROR_OK)
918                                 break;
919                 }
920         }
921
922         if (retval != ERROR_OK)
923                 return retval;
924
925         target->debug_reason = DBG_REASON_NOTHALTED;
926
927         if (!debug_execution) {
928                 target->state = TARGET_RUNNING;
929                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
930                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
931         } else {
932                 target->state = TARGET_DEBUG_RUNNING;
933                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
934                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
935         }
936
937         return ERROR_OK;
938 }
939
940 static int aarch64_debug_entry(struct target *target)
941 {
942         int retval = ERROR_OK;
943         struct armv8_common *armv8 = target_to_armv8(target);
944         struct arm_dpm *dpm = &armv8->dpm;
945         enum arm_state core_state;
946         uint32_t dscr;
947
948         /* make sure to clear all sticky errors */
949         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
950                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
951         if (retval == ERROR_OK)
952                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
953                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
954         if (retval == ERROR_OK)
955                 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
956
957         if (retval != ERROR_OK)
958                 return retval;
959
960         LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
961
962         dpm->dscr = dscr;
963         core_state = armv8_dpm_get_core_state(dpm);
964         armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
965         armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
966
967         /* close the CTI gate for all events */
968         if (retval == ERROR_OK)
969                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
970         /* discard async exceptions */
971         if (retval == ERROR_OK)
972                 retval = dpm->instr_cpsr_sync(dpm);
973         if (retval != ERROR_OK)
974                 return retval;
975
976         /* Examine debug reason */
977         armv8_dpm_report_dscr(dpm, dscr);
978
979         /* save address of instruction that triggered the watchpoint? */
980         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
981                 uint32_t tmp;
982                 uint64_t wfar = 0;
983
984                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
985                                 armv8->debug_base + CPUV8_DBG_WFAR1,
986                                 &tmp);
987                 if (retval != ERROR_OK)
988                         return retval;
989                 wfar = tmp;
990                 wfar = (wfar << 32);
991                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
992                                 armv8->debug_base + CPUV8_DBG_WFAR0,
993                                 &tmp);
994                 if (retval != ERROR_OK)
995                         return retval;
996                 wfar |= tmp;
997                 armv8_dpm_report_wfar(&armv8->dpm, wfar);
998         }
999
1000         retval = armv8_dpm_read_current_registers(&armv8->dpm);
1001
1002         if (retval == ERROR_OK && armv8->post_debug_entry)
1003                 retval = armv8->post_debug_entry(target);
1004
1005         return retval;
1006 }
1007
1008 static int aarch64_post_debug_entry(struct target *target)
1009 {
1010         struct aarch64_common *aarch64 = target_to_aarch64(target);
1011         struct armv8_common *armv8 = &aarch64->armv8_common;
1012         int retval;
1013         enum arm_mode target_mode = ARM_MODE_ANY;
1014         uint32_t instr;
1015
1016         switch (armv8->arm.core_mode) {
1017         case ARMV8_64_EL0T:
1018                 target_mode = ARMV8_64_EL1H;
1019                 /* fall through */
1020         case ARMV8_64_EL1T:
1021         case ARMV8_64_EL1H:
1022                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1023                 break;
1024         case ARMV8_64_EL2T:
1025         case ARMV8_64_EL2H:
1026                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1027                 break;
1028         case ARMV8_64_EL3H:
1029         case ARMV8_64_EL3T:
1030                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1031                 break;
1032
1033         case ARM_MODE_SVC:
1034         case ARM_MODE_ABT:
1035         case ARM_MODE_FIQ:
1036         case ARM_MODE_IRQ:
1037         case ARM_MODE_SYS:
1038                 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1039                 break;
1040
1041         default:
1042                 LOG_INFO("cannot read system control register in this mode");
1043                 return ERROR_FAIL;
1044         }
1045
1046         if (target_mode != ARM_MODE_ANY)
1047                 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1048
1049         retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1050         if (retval != ERROR_OK)
1051                 return retval;
1052
1053         if (target_mode != ARM_MODE_ANY)
1054                 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1055
1056         LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1057         aarch64->system_control_reg_curr = aarch64->system_control_reg;
1058
1059         if (armv8->armv8_mmu.armv8_cache.info == -1) {
1060                 armv8_identify_cache(armv8);
1061                 armv8_read_mpidr(armv8);
1062         }
1063
1064         armv8->armv8_mmu.mmu_enabled =
1065                         (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1066         armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1067                 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1068         armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1069                 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1070         return ERROR_OK;
1071 }
1072
1073 /*
1074  * single-step a target
1075  */
1076 static int aarch64_step(struct target *target, int current, target_addr_t address,
1077         int handle_breakpoints)
1078 {
1079         struct armv8_common *armv8 = target_to_armv8(target);
1080         struct aarch64_common *aarch64 = target_to_aarch64(target);
1081         int saved_retval = ERROR_OK;
1082         int retval;
1083         uint32_t edecr;
1084
1085         armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1086
1087         if (target->state != TARGET_HALTED) {
1088                 LOG_WARNING("target not halted");
1089                 return ERROR_TARGET_NOT_HALTED;
1090         }
1091
1092         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1093                         armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1094         /* make sure EDECR.SS is not set when restoring the register */
1095
1096         if (retval == ERROR_OK) {
1097                 edecr &= ~0x4;
1098                 /* set EDECR.SS to enter hardware step mode */
1099                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1100                                 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1101         }
1102         /* disable interrupts while stepping */
1103         if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1104                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1105         /* bail out if stepping setup has failed */
1106         if (retval != ERROR_OK)
1107                 return retval;
1108
1109         if (target->smp && (current == 1)) {
1110                 /*
1111                  * isolate current target so that it doesn't get resumed
1112                  * together with the others
1113                  */
1114                 retval = arm_cti_gate_channel(armv8->cti, 1);
1115                 /* resume all other targets in the group */
1116                 if (retval == ERROR_OK)
1117                         retval = aarch64_step_restart_smp(target);
1118                 if (retval != ERROR_OK) {
1119                         LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1120                         return retval;
1121                 }
1122                 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1123         }
1124
1125         /* all other targets running, restore and restart the current target */
1126         retval = aarch64_restore_one(target, current, &address, 0, 0);
1127         if (retval == ERROR_OK)
1128                 retval = aarch64_restart_one(target, RESTART_LAZY);
1129
1130         if (retval != ERROR_OK)
1131                 return retval;
1132
1133         LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1134         if (!handle_breakpoints)
1135                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1136
1137         int64_t then = timeval_ms();
1138         for (;;) {
1139                 int stepped;
1140                 uint32_t prsr;
1141
1142                 retval = aarch64_check_state_one(target,
1143                                         PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1144                 if (retval != ERROR_OK || stepped)
1145                         break;
1146
1147                 if (timeval_ms() > then + 100) {
1148                         LOG_ERROR("timeout waiting for target %s halt after step",
1149                                         target_name(target));
1150                         retval = ERROR_TARGET_TIMEOUT;
1151                         break;
1152                 }
1153         }
1154
1155         /*
1156          * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1157          * causes a timeout. The core takes the step but doesn't complete it and so
1158          * debug state is never entered. However, you can manually halt the core
1159          * as an external debug even is also a WFI wakeup event.
1160          */
1161         if (retval == ERROR_TARGET_TIMEOUT)
1162                 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1163
1164         /* restore EDECR */
1165         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1166                         armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1167         if (retval != ERROR_OK)
1168                 return retval;
1169
1170         /* restore interrupts */
1171         if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1172                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1173                 if (retval != ERROR_OK)
1174                         return ERROR_OK;
1175         }
1176
1177         if (saved_retval != ERROR_OK)
1178                 return saved_retval;
1179
1180         return aarch64_poll(target);
1181 }
1182
1183 static int aarch64_restore_context(struct target *target, bool bpwp)
1184 {
1185         struct armv8_common *armv8 = target_to_armv8(target);
1186         struct arm *arm = &armv8->arm;
1187
1188         int retval;
1189
1190         LOG_DEBUG("%s", target_name(target));
1191
1192         if (armv8->pre_restore_context)
1193                 armv8->pre_restore_context(target);
1194
1195         retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1196         if (retval == ERROR_OK) {
1197                 /* registers are now invalid */
1198                 register_cache_invalidate(arm->core_cache);
1199                 register_cache_invalidate(arm->core_cache->next);
1200         }
1201
1202         return retval;
1203 }
1204
1205 /*
1206  * Cortex-A8 Breakpoint and watchpoint functions
1207  */
1208
1209 /* Setup hardware Breakpoint Register Pair */
1210 static int aarch64_set_breakpoint(struct target *target,
1211         struct breakpoint *breakpoint, uint8_t matchmode)
1212 {
1213         int retval;
1214         int brp_i = 0;
1215         uint32_t control;
1216         uint8_t byte_addr_select = 0x0F;
1217         struct aarch64_common *aarch64 = target_to_aarch64(target);
1218         struct armv8_common *armv8 = &aarch64->armv8_common;
1219         struct aarch64_brp *brp_list = aarch64->brp_list;
1220
1221         if (breakpoint->set) {
1222                 LOG_WARNING("breakpoint already set");
1223                 return ERROR_OK;
1224         }
1225
1226         if (breakpoint->type == BKPT_HARD) {
1227                 int64_t bpt_value;
1228                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1229                         brp_i++;
1230                 if (brp_i >= aarch64->brp_num) {
1231                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1232                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1233                 }
1234                 breakpoint->set = brp_i + 1;
1235                 if (breakpoint->length == 2)
1236                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1237                 control = ((matchmode & 0x7) << 20)
1238                         | (1 << 13)
1239                         | (byte_addr_select << 5)
1240                         | (3 << 1) | 1;
1241                 brp_list[brp_i].used = 1;
1242                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1243                 brp_list[brp_i].control = control;
1244                 bpt_value = brp_list[brp_i].value;
1245
1246                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1247                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1248                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1249                 if (retval != ERROR_OK)
1250                         return retval;
1251                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1252                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1253                                 (uint32_t)(bpt_value >> 32));
1254                 if (retval != ERROR_OK)
1255                         return retval;
1256
1257                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1258                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1259                                 brp_list[brp_i].control);
1260                 if (retval != ERROR_OK)
1261                         return retval;
1262                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1263                         brp_list[brp_i].control,
1264                         brp_list[brp_i].value);
1265
1266         } else if (breakpoint->type == BKPT_SOFT) {
1267                 uint8_t code[4];
1268
1269                 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1270                 retval = target_read_memory(target,
1271                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1272                                 breakpoint->length, 1,
1273                                 breakpoint->orig_instr);
1274                 if (retval != ERROR_OK)
1275                         return retval;
1276
1277                 armv8_cache_d_inner_flush_virt(armv8,
1278                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1279                                 breakpoint->length);
1280
1281                 retval = target_write_memory(target,
1282                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1283                                 breakpoint->length, 1, code);
1284                 if (retval != ERROR_OK)
1285                         return retval;
1286
1287                 armv8_cache_d_inner_flush_virt(armv8,
1288                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1289                                 breakpoint->length);
1290
1291                 armv8_cache_i_inner_inval_virt(armv8,
1292                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1293                                 breakpoint->length);
1294
1295                 breakpoint->set = 0x11; /* Any nice value but 0 */
1296         }
1297
1298         /* Ensure that halting debug mode is enable */
1299         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1300         if (retval != ERROR_OK) {
1301                 LOG_DEBUG("Failed to set DSCR.HDE");
1302                 return retval;
1303         }
1304
1305         return ERROR_OK;
1306 }
1307
1308 static int aarch64_set_context_breakpoint(struct target *target,
1309         struct breakpoint *breakpoint, uint8_t matchmode)
1310 {
1311         int retval = ERROR_FAIL;
1312         int brp_i = 0;
1313         uint32_t control;
1314         uint8_t byte_addr_select = 0x0F;
1315         struct aarch64_common *aarch64 = target_to_aarch64(target);
1316         struct armv8_common *armv8 = &aarch64->armv8_common;
1317         struct aarch64_brp *brp_list = aarch64->brp_list;
1318
1319         if (breakpoint->set) {
1320                 LOG_WARNING("breakpoint already set");
1321                 return retval;
1322         }
1323         /*check available context BRPs*/
1324         while ((brp_list[brp_i].used ||
1325                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1326                 brp_i++;
1327
1328         if (brp_i >= aarch64->brp_num) {
1329                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1330                 return ERROR_FAIL;
1331         }
1332
1333         breakpoint->set = brp_i + 1;
1334         control = ((matchmode & 0x7) << 20)
1335                 | (1 << 13)
1336                 | (byte_addr_select << 5)
1337                 | (3 << 1) | 1;
1338         brp_list[brp_i].used = 1;
1339         brp_list[brp_i].value = (breakpoint->asid);
1340         brp_list[brp_i].control = control;
1341         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1342                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1343                         brp_list[brp_i].value);
1344         if (retval != ERROR_OK)
1345                 return retval;
1346         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1347                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1348                         brp_list[brp_i].control);
1349         if (retval != ERROR_OK)
1350                 return retval;
1351         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1352                 brp_list[brp_i].control,
1353                 brp_list[brp_i].value);
1354         return ERROR_OK;
1355
1356 }
1357
1358 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1359 {
1360         int retval = ERROR_FAIL;
1361         int brp_1 = 0;  /* holds the contextID pair */
1362         int brp_2 = 0;  /* holds the IVA pair */
1363         uint32_t control_CTX, control_IVA;
1364         uint8_t CTX_byte_addr_select = 0x0F;
1365         uint8_t IVA_byte_addr_select = 0x0F;
1366         uint8_t CTX_machmode = 0x03;
1367         uint8_t IVA_machmode = 0x01;
1368         struct aarch64_common *aarch64 = target_to_aarch64(target);
1369         struct armv8_common *armv8 = &aarch64->armv8_common;
1370         struct aarch64_brp *brp_list = aarch64->brp_list;
1371
1372         if (breakpoint->set) {
1373                 LOG_WARNING("breakpoint already set");
1374                 return retval;
1375         }
1376         /*check available context BRPs*/
1377         while ((brp_list[brp_1].used ||
1378                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1379                 brp_1++;
1380
1381         printf("brp(CTX) found num: %d\n", brp_1);
1382         if (brp_1 >= aarch64->brp_num) {
1383                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1384                 return ERROR_FAIL;
1385         }
1386
1387         while ((brp_list[brp_2].used ||
1388                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1389                 brp_2++;
1390
1391         printf("brp(IVA) found num: %d\n", brp_2);
1392         if (brp_2 >= aarch64->brp_num) {
1393                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1394                 return ERROR_FAIL;
1395         }
1396
1397         breakpoint->set = brp_1 + 1;
1398         breakpoint->linked_BRP = brp_2;
1399         control_CTX = ((CTX_machmode & 0x7) << 20)
1400                 | (brp_2 << 16)
1401                 | (0 << 14)
1402                 | (CTX_byte_addr_select << 5)
1403                 | (3 << 1) | 1;
1404         brp_list[brp_1].used = 1;
1405         brp_list[brp_1].value = (breakpoint->asid);
1406         brp_list[brp_1].control = control_CTX;
1407         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1408                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1409                         brp_list[brp_1].value);
1410         if (retval != ERROR_OK)
1411                 return retval;
1412         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1413                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1414                         brp_list[brp_1].control);
1415         if (retval != ERROR_OK)
1416                 return retval;
1417
1418         control_IVA = ((IVA_machmode & 0x7) << 20)
1419                 | (brp_1 << 16)
1420                 | (1 << 13)
1421                 | (IVA_byte_addr_select << 5)
1422                 | (3 << 1) | 1;
1423         brp_list[brp_2].used = 1;
1424         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1425         brp_list[brp_2].control = control_IVA;
1426         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1427                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1428                         brp_list[brp_2].value & 0xFFFFFFFF);
1429         if (retval != ERROR_OK)
1430                 return retval;
1431         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1432                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1433                         brp_list[brp_2].value >> 32);
1434         if (retval != ERROR_OK)
1435                 return retval;
1436         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1437                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1438                         brp_list[brp_2].control);
1439         if (retval != ERROR_OK)
1440                 return retval;
1441
1442         return ERROR_OK;
1443 }
1444
1445 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1446 {
1447         int retval;
1448         struct aarch64_common *aarch64 = target_to_aarch64(target);
1449         struct armv8_common *armv8 = &aarch64->armv8_common;
1450         struct aarch64_brp *brp_list = aarch64->brp_list;
1451
1452         if (!breakpoint->set) {
1453                 LOG_WARNING("breakpoint not set");
1454                 return ERROR_OK;
1455         }
1456
1457         if (breakpoint->type == BKPT_HARD) {
1458                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1459                         int brp_i = breakpoint->set - 1;
1460                         int brp_j = breakpoint->linked_BRP;
1461                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1462                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1463                                 return ERROR_OK;
1464                         }
1465                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1466                                 brp_list[brp_i].control, brp_list[brp_i].value);
1467                         brp_list[brp_i].used = 0;
1468                         brp_list[brp_i].value = 0;
1469                         brp_list[brp_i].control = 0;
1470                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1471                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1472                                         brp_list[brp_i].control);
1473                         if (retval != ERROR_OK)
1474                                 return retval;
1475                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1476                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1477                                         (uint32_t)brp_list[brp_i].value);
1478                         if (retval != ERROR_OK)
1479                                 return retval;
1480                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1481                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1482                                         (uint32_t)brp_list[brp_i].value);
1483                         if (retval != ERROR_OK)
1484                                 return retval;
1485                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1486                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1487                                 return ERROR_OK;
1488                         }
1489                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1490                                 brp_list[brp_j].control, brp_list[brp_j].value);
1491                         brp_list[brp_j].used = 0;
1492                         brp_list[brp_j].value = 0;
1493                         brp_list[brp_j].control = 0;
1494                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1495                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1496                                         brp_list[brp_j].control);
1497                         if (retval != ERROR_OK)
1498                                 return retval;
1499                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1500                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1501                                         (uint32_t)brp_list[brp_j].value);
1502                         if (retval != ERROR_OK)
1503                                 return retval;
1504                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1505                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1506                                         (uint32_t)brp_list[brp_j].value);
1507                         if (retval != ERROR_OK)
1508                                 return retval;
1509
1510                         breakpoint->linked_BRP = 0;
1511                         breakpoint->set = 0;
1512                         return ERROR_OK;
1513
1514                 } else {
1515                         int brp_i = breakpoint->set - 1;
1516                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1517                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1518                                 return ERROR_OK;
1519                         }
1520                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1521                                 brp_list[brp_i].control, brp_list[brp_i].value);
1522                         brp_list[brp_i].used = 0;
1523                         brp_list[brp_i].value = 0;
1524                         brp_list[brp_i].control = 0;
1525                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1526                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1527                                         brp_list[brp_i].control);
1528                         if (retval != ERROR_OK)
1529                                 return retval;
1530                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1531                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1532                                         brp_list[brp_i].value);
1533                         if (retval != ERROR_OK)
1534                                 return retval;
1535
1536                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1537                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1538                                         (uint32_t)brp_list[brp_i].value);
1539                         if (retval != ERROR_OK)
1540                                 return retval;
1541                         breakpoint->set = 0;
1542                         return ERROR_OK;
1543                 }
1544         } else {
1545                 /* restore original instruction (kept in target endianness) */
1546
1547                 armv8_cache_d_inner_flush_virt(armv8,
1548                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1549                                 breakpoint->length);
1550
1551                 if (breakpoint->length == 4) {
1552                         retval = target_write_memory(target,
1553                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1554                                         4, 1, breakpoint->orig_instr);
1555                         if (retval != ERROR_OK)
1556                                 return retval;
1557                 } else {
1558                         retval = target_write_memory(target,
1559                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1560                                         2, 1, breakpoint->orig_instr);
1561                         if (retval != ERROR_OK)
1562                                 return retval;
1563                 }
1564
1565                 armv8_cache_d_inner_flush_virt(armv8,
1566                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1567                                 breakpoint->length);
1568
1569                 armv8_cache_i_inner_inval_virt(armv8,
1570                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1571                                 breakpoint->length);
1572         }
1573         breakpoint->set = 0;
1574
1575         return ERROR_OK;
1576 }
1577
1578 static int aarch64_add_breakpoint(struct target *target,
1579         struct breakpoint *breakpoint)
1580 {
1581         struct aarch64_common *aarch64 = target_to_aarch64(target);
1582
1583         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1584                 LOG_INFO("no hardware breakpoint available");
1585                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1586         }
1587
1588         if (breakpoint->type == BKPT_HARD)
1589                 aarch64->brp_num_available--;
1590
1591         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1592 }
1593
1594 static int aarch64_add_context_breakpoint(struct target *target,
1595         struct breakpoint *breakpoint)
1596 {
1597         struct aarch64_common *aarch64 = target_to_aarch64(target);
1598
1599         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1600                 LOG_INFO("no hardware breakpoint available");
1601                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1602         }
1603
1604         if (breakpoint->type == BKPT_HARD)
1605                 aarch64->brp_num_available--;
1606
1607         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1608 }
1609
1610 static int aarch64_add_hybrid_breakpoint(struct target *target,
1611         struct breakpoint *breakpoint)
1612 {
1613         struct aarch64_common *aarch64 = target_to_aarch64(target);
1614
1615         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1616                 LOG_INFO("no hardware breakpoint available");
1617                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1618         }
1619
1620         if (breakpoint->type == BKPT_HARD)
1621                 aarch64->brp_num_available--;
1622
1623         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1624 }
1625
1626
1627 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1628 {
1629         struct aarch64_common *aarch64 = target_to_aarch64(target);
1630
1631 #if 0
1632 /* It is perfectly possible to remove breakpoints while the target is running */
1633         if (target->state != TARGET_HALTED) {
1634                 LOG_WARNING("target not halted");
1635                 return ERROR_TARGET_NOT_HALTED;
1636         }
1637 #endif
1638
1639         if (breakpoint->set) {
1640                 aarch64_unset_breakpoint(target, breakpoint);
1641                 if (breakpoint->type == BKPT_HARD)
1642                         aarch64->brp_num_available++;
1643         }
1644
1645         return ERROR_OK;
1646 }
1647
1648 /*
1649  * Cortex-A8 Reset functions
1650  */
1651
1652 static int aarch64_assert_reset(struct target *target)
1653 {
1654         struct armv8_common *armv8 = target_to_armv8(target);
1655
1656         LOG_DEBUG(" ");
1657
1658         /* FIXME when halt is requested, make it work somehow... */
1659
1660         /* Issue some kind of warm reset. */
1661         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1662                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1663         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1664                 /* REVISIT handle "pulls" cases, if there's
1665                  * hardware that needs them to work.
1666                  */
1667                 jtag_add_reset(0, 1);
1668         } else {
1669                 LOG_ERROR("%s: how to reset?", target_name(target));
1670                 return ERROR_FAIL;
1671         }
1672
1673         /* registers are now invalid */
1674         if (target_was_examined(target)) {
1675                 register_cache_invalidate(armv8->arm.core_cache);
1676                 register_cache_invalidate(armv8->arm.core_cache->next);
1677         }
1678
1679         target->state = TARGET_RESET;
1680
1681         return ERROR_OK;
1682 }
1683
1684 static int aarch64_deassert_reset(struct target *target)
1685 {
1686         int retval;
1687
1688         LOG_DEBUG(" ");
1689
1690         /* be certain SRST is off */
1691         jtag_add_reset(0, 0);
1692
1693         if (!target_was_examined(target))
1694                 return ERROR_OK;
1695
1696         retval = aarch64_poll(target);
1697         if (retval != ERROR_OK)
1698                 return retval;
1699
1700         retval = aarch64_init_debug_access(target);
1701         if (retval != ERROR_OK)
1702                 return retval;
1703
1704         if (target->reset_halt) {
1705                 if (target->state != TARGET_HALTED) {
1706                         LOG_WARNING("%s: ran after reset and before halt ...",
1707                                 target_name(target));
1708                         retval = target_halt(target);
1709                 }
1710         }
1711
1712         return retval;
1713 }
1714
1715 static int aarch64_write_cpu_memory_slow(struct target *target,
1716         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1717 {
1718         struct armv8_common *armv8 = target_to_armv8(target);
1719         struct arm_dpm *dpm = &armv8->dpm;
1720         struct arm *arm = &armv8->arm;
1721         int retval;
1722
1723         armv8_reg_current(arm, 1)->dirty = true;
1724
1725         /* change DCC to normal mode if necessary */
1726         if (*dscr & DSCR_MA) {
1727                 *dscr &= ~DSCR_MA;
1728                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1729                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1730                 if (retval != ERROR_OK)
1731                         return retval;
1732         }
1733
1734         while (count) {
1735                 uint32_t data, opcode;
1736
1737                 /* write the data to store into DTRRX */
1738                 if (size == 1)
1739                         data = *buffer;
1740                 else if (size == 2)
1741                         data = target_buffer_get_u16(target, buffer);
1742                 else
1743                         data = target_buffer_get_u32(target, buffer);
1744                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1745                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1746                 if (retval != ERROR_OK)
1747                         return retval;
1748
1749                 if (arm->core_state == ARM_STATE_AARCH64)
1750                         retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1751                 else
1752                         retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1753                 if (retval != ERROR_OK)
1754                         return retval;
1755
1756                 if (size == 1)
1757                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1758                 else if (size == 2)
1759                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1760                 else
1761                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1762                 retval = dpm->instr_execute(dpm, opcode);
1763                 if (retval != ERROR_OK)
1764                         return retval;
1765
1766                 /* Advance */
1767                 buffer += size;
1768                 --count;
1769         }
1770
1771         return ERROR_OK;
1772 }
1773
1774 static int aarch64_write_cpu_memory_fast(struct target *target,
1775         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1776 {
1777         struct armv8_common *armv8 = target_to_armv8(target);
1778         struct arm *arm = &armv8->arm;
1779         int retval;
1780
1781         armv8_reg_current(arm, 1)->dirty = true;
1782
1783         /* Step 1.d   - Change DCC to memory mode */
1784         *dscr |= DSCR_MA;
1785         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1786                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1787         if (retval != ERROR_OK)
1788                 return retval;
1789
1790
1791         /* Step 2.a   - Do the write */
1792         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1793                                         buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1794         if (retval != ERROR_OK)
1795                 return retval;
1796
1797         /* Step 3.a   - Switch DTR mode back to Normal mode */
1798         *dscr &= ~DSCR_MA;
1799         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1800                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1801         if (retval != ERROR_OK)
1802                 return retval;
1803
1804         return ERROR_OK;
1805 }
1806
1807 static int aarch64_write_cpu_memory(struct target *target,
1808         uint64_t address, uint32_t size,
1809         uint32_t count, const uint8_t *buffer)
1810 {
1811         /* write memory through APB-AP */
1812         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1813         struct armv8_common *armv8 = target_to_armv8(target);
1814         struct arm_dpm *dpm = &armv8->dpm;
1815         struct arm *arm = &armv8->arm;
1816         uint32_t dscr;
1817
1818         if (target->state != TARGET_HALTED) {
1819                 LOG_WARNING("target not halted");
1820                 return ERROR_TARGET_NOT_HALTED;
1821         }
1822
1823         /* Mark register X0 as dirty, as it will be used
1824          * for transferring the data.
1825          * It will be restored automatically when exiting
1826          * debug mode
1827          */
1828         armv8_reg_current(arm, 0)->dirty = true;
1829
1830         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1831
1832         /* Read DSCR */
1833         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1834                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1835         if (retval != ERROR_OK)
1836                 return retval;
1837
1838         /* Set Normal access mode  */
1839         dscr = (dscr & ~DSCR_MA);
1840         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1841                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1842         if (retval != ERROR_OK)
1843                 return retval;
1844
1845         if (arm->core_state == ARM_STATE_AARCH64) {
1846                 /* Write X0 with value 'address' using write procedure */
1847                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1848                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1849                 retval = dpm->instr_write_data_dcc_64(dpm,
1850                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1851         } else {
1852                 /* Write R0 with value 'address' using write procedure */
1853                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1854                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1855                 retval = dpm->instr_write_data_dcc(dpm,
1856                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1857         }
1858
1859         if (retval != ERROR_OK)
1860                 return retval;
1861
1862         if (size == 4 && (address % 4) == 0)
1863                 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1864         else
1865                 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1866
1867         if (retval != ERROR_OK) {
1868                 /* Unset DTR mode */
1869                 mem_ap_read_atomic_u32(armv8->debug_ap,
1870                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1871                 dscr &= ~DSCR_MA;
1872                 mem_ap_write_atomic_u32(armv8->debug_ap,
1873                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1874         }
1875
1876         /* Check for sticky abort flags in the DSCR */
1877         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1878                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1879         if (retval != ERROR_OK)
1880                 return retval;
1881
1882         dpm->dscr = dscr;
1883         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1884                 /* Abort occurred - clear it and exit */
1885                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1886                 armv8_dpm_handle_exception(dpm, true);
1887                 return ERROR_FAIL;
1888         }
1889
1890         /* Done */
1891         return ERROR_OK;
1892 }
1893
1894 static int aarch64_read_cpu_memory_slow(struct target *target,
1895         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1896 {
1897         struct armv8_common *armv8 = target_to_armv8(target);
1898         struct arm_dpm *dpm = &armv8->dpm;
1899         struct arm *arm = &armv8->arm;
1900         int retval;
1901
1902         armv8_reg_current(arm, 1)->dirty = true;
1903
1904         /* change DCC to normal mode (if necessary) */
1905         if (*dscr & DSCR_MA) {
1906                 *dscr &= DSCR_MA;
1907                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1908                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1909                 if (retval != ERROR_OK)
1910                         return retval;
1911         }
1912
1913         while (count) {
1914                 uint32_t opcode, data;
1915
1916                 if (size == 1)
1917                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1918                 else if (size == 2)
1919                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1920                 else
1921                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1922                 retval = dpm->instr_execute(dpm, opcode);
1923                 if (retval != ERROR_OK)
1924                         return retval;
1925
1926                 if (arm->core_state == ARM_STATE_AARCH64)
1927                         retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1928                 else
1929                         retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1930                 if (retval != ERROR_OK)
1931                         return retval;
1932
1933                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1934                                 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1935                 if (retval != ERROR_OK)
1936                         return retval;
1937
1938                 if (size == 1)
1939                         *buffer = (uint8_t)data;
1940                 else if (size == 2)
1941                         target_buffer_set_u16(target, buffer, (uint16_t)data);
1942                 else
1943                         target_buffer_set_u32(target, buffer, data);
1944
1945                 /* Advance */
1946                 buffer += size;
1947                 --count;
1948         }
1949
1950         return ERROR_OK;
1951 }
1952
1953 static int aarch64_read_cpu_memory_fast(struct target *target,
1954         uint32_t count, uint8_t *buffer, uint32_t *dscr)
1955 {
1956         struct armv8_common *armv8 = target_to_armv8(target);
1957         struct arm_dpm *dpm = &armv8->dpm;
1958         struct arm *arm = &armv8->arm;
1959         int retval;
1960         uint32_t value;
1961
1962         /* Mark X1 as dirty */
1963         armv8_reg_current(arm, 1)->dirty = true;
1964
1965         if (arm->core_state == ARM_STATE_AARCH64) {
1966                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1967                 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1968         } else {
1969                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1970                 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1971         }
1972
1973         if (retval != ERROR_OK)
1974                 return retval;
1975
1976         /* Step 1.e - Change DCC to memory mode */
1977         *dscr |= DSCR_MA;
1978         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1979                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1980         if (retval != ERROR_OK)
1981                 return retval;
1982
1983         /* Step 1.f - read DBGDTRTX and discard the value */
1984         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1985                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1986         if (retval != ERROR_OK)
1987                 return retval;
1988
1989         count--;
1990         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1991          * Abort flags are sticky, so can be read at end of transactions
1992          *
1993          * This data is read in aligned to 32 bit boundary.
1994          */
1995
1996         if (count) {
1997                 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1998                  * increments X0 by 4. */
1999                 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2000                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
2001                 if (retval != ERROR_OK)
2002                         return retval;
2003         }
2004
2005         /* Step 3.a - set DTR access mode back to Normal mode   */
2006         *dscr &= ~DSCR_MA;
2007         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2008                                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2009         if (retval != ERROR_OK)
2010                 return retval;
2011
2012         /* Step 3.b - read DBGDTRTX for the final value */
2013         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2014                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2015         if (retval != ERROR_OK)
2016                 return retval;
2017
2018         target_buffer_set_u32(target, buffer + count * 4, value);
2019         return retval;
2020 }
2021
2022 static int aarch64_read_cpu_memory(struct target *target,
2023         target_addr_t address, uint32_t size,
2024         uint32_t count, uint8_t *buffer)
2025 {
2026         /* read memory through APB-AP */
2027         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2028         struct armv8_common *armv8 = target_to_armv8(target);
2029         struct arm_dpm *dpm = &armv8->dpm;
2030         struct arm *arm = &armv8->arm;
2031         uint32_t dscr;
2032
2033         LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2034                         address, size, count);
2035
2036         if (target->state != TARGET_HALTED) {
2037                 LOG_WARNING("target not halted");
2038                 return ERROR_TARGET_NOT_HALTED;
2039         }
2040
2041         /* Mark register X0 as dirty, as it will be used
2042          * for transferring the data.
2043          * It will be restored automatically when exiting
2044          * debug mode
2045          */
2046         armv8_reg_current(arm, 0)->dirty = true;
2047
2048         /* Read DSCR */
2049         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2050                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2051         if (retval != ERROR_OK)
2052                 return retval;
2053
2054         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2055
2056         /* Set Normal access mode  */
2057         dscr &= ~DSCR_MA;
2058         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2059                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2060         if (retval != ERROR_OK)
2061                 return retval;
2062
2063         if (arm->core_state == ARM_STATE_AARCH64) {
2064                 /* Write X0 with value 'address' using write procedure */
2065                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2066                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2067                 retval = dpm->instr_write_data_dcc_64(dpm,
2068                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2069         } else {
2070                 /* Write R0 with value 'address' using write procedure */
2071                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2072                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2073                 retval = dpm->instr_write_data_dcc(dpm,
2074                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2075         }
2076
2077         if (retval != ERROR_OK)
2078                 return retval;
2079
2080         if (size == 4 && (address % 4) == 0)
2081                 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2082         else
2083                 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2084
2085         if (dscr & DSCR_MA) {
2086                 dscr &= ~DSCR_MA;
2087                 mem_ap_write_atomic_u32(armv8->debug_ap,
2088                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2089         }
2090
2091         if (retval != ERROR_OK)
2092                 return retval;
2093
2094         /* Check for sticky abort flags in the DSCR */
2095         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2096                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2097         if (retval != ERROR_OK)
2098                 return retval;
2099
2100         dpm->dscr = dscr;
2101
2102         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2103                 /* Abort occurred - clear it and exit */
2104                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2105                 armv8_dpm_handle_exception(dpm, true);
2106                 return ERROR_FAIL;
2107         }
2108
2109         /* Done */
2110         return ERROR_OK;
2111 }
2112
2113 static int aarch64_read_phys_memory(struct target *target,
2114         target_addr_t address, uint32_t size,
2115         uint32_t count, uint8_t *buffer)
2116 {
2117         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2118
2119         if (count && buffer) {
2120                 /* read memory through APB-AP */
2121                 retval = aarch64_mmu_modify(target, 0);
2122                 if (retval != ERROR_OK)
2123                         return retval;
2124                 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2125         }
2126         return retval;
2127 }
2128
2129 static int aarch64_read_memory(struct target *target, target_addr_t address,
2130         uint32_t size, uint32_t count, uint8_t *buffer)
2131 {
2132         int mmu_enabled = 0;
2133         int retval;
2134
2135         /* determine if MMU was enabled on target stop */
2136         retval = aarch64_mmu(target, &mmu_enabled);
2137         if (retval != ERROR_OK)
2138                 return retval;
2139
2140         if (mmu_enabled) {
2141                 /* enable MMU as we could have disabled it for phys access */
2142                 retval = aarch64_mmu_modify(target, 1);
2143                 if (retval != ERROR_OK)
2144                         return retval;
2145         }
2146         return aarch64_read_cpu_memory(target, address, size, count, buffer);
2147 }
2148
2149 static int aarch64_write_phys_memory(struct target *target,
2150         target_addr_t address, uint32_t size,
2151         uint32_t count, const uint8_t *buffer)
2152 {
2153         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2154
2155         if (count && buffer) {
2156                 /* write memory through APB-AP */
2157                 retval = aarch64_mmu_modify(target, 0);
2158                 if (retval != ERROR_OK)
2159                         return retval;
2160                 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2161         }
2162
2163         return retval;
2164 }
2165
2166 static int aarch64_write_memory(struct target *target, target_addr_t address,
2167         uint32_t size, uint32_t count, const uint8_t *buffer)
2168 {
2169         int mmu_enabled = 0;
2170         int retval;
2171
2172         /* determine if MMU was enabled on target stop */
2173         retval = aarch64_mmu(target, &mmu_enabled);
2174         if (retval != ERROR_OK)
2175                 return retval;
2176
2177         if (mmu_enabled) {
2178                 /* enable MMU as we could have disabled it for phys access */
2179                 retval = aarch64_mmu_modify(target, 1);
2180                 if (retval != ERROR_OK)
2181                         return retval;
2182         }
2183         return aarch64_write_cpu_memory(target, address, size, count, buffer);
2184 }
2185
2186 static int aarch64_handle_target_request(void *priv)
2187 {
2188         struct target *target = priv;
2189         struct armv8_common *armv8 = target_to_armv8(target);
2190         int retval;
2191
2192         if (!target_was_examined(target))
2193                 return ERROR_OK;
2194         if (!target->dbg_msg_enabled)
2195                 return ERROR_OK;
2196
2197         if (target->state == TARGET_RUNNING) {
2198                 uint32_t request;
2199                 uint32_t dscr;
2200                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2201                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2202
2203                 /* check if we have data */
2204                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2205                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2206                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2207                         if (retval == ERROR_OK) {
2208                                 target_request(target, request);
2209                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2210                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2211                         }
2212                 }
2213         }
2214
2215         return ERROR_OK;
2216 }
2217
2218 static int aarch64_examine_first(struct target *target)
2219 {
2220         struct aarch64_common *aarch64 = target_to_aarch64(target);
2221         struct armv8_common *armv8 = &aarch64->armv8_common;
2222         struct adiv5_dap *swjdp = armv8->arm.dap;
2223         struct aarch64_private_config *pc;
2224         int i;
2225         int retval = ERROR_OK;
2226         uint64_t debug, ttypr;
2227         uint32_t cpuid;
2228         uint32_t tmp0, tmp1, tmp2, tmp3;
2229         debug = ttypr = cpuid = 0;
2230
2231         /* Search for the APB-AB - it is needed for access to debug registers */
2232         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2233         if (retval != ERROR_OK) {
2234                 LOG_ERROR("Could not find APB-AP for debug access");
2235                 return retval;
2236         }
2237
2238         retval = mem_ap_init(armv8->debug_ap);
2239         if (retval != ERROR_OK) {
2240                 LOG_ERROR("Could not initialize the APB-AP");
2241                 return retval;
2242         }
2243
2244         armv8->debug_ap->memaccess_tck = 10;
2245
2246         if (!target->dbgbase_set) {
2247                 uint32_t dbgbase;
2248                 /* Get ROM Table base */
2249                 uint32_t apid;
2250                 int32_t coreidx = target->coreid;
2251                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2252                 if (retval != ERROR_OK)
2253                         return retval;
2254                 /* Lookup 0x15 -- Processor DAP */
2255                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2256                                 &armv8->debug_base, &coreidx);
2257                 if (retval != ERROR_OK)
2258                         return retval;
2259                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2260                                 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2261         } else
2262                 armv8->debug_base = target->dbgbase;
2263
2264         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2265                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2266         if (retval != ERROR_OK) {
2267                 LOG_DEBUG("Examine %s failed", "oslock");
2268                 return retval;
2269         }
2270
2271         retval = mem_ap_read_u32(armv8->debug_ap,
2272                         armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2273         if (retval != ERROR_OK) {
2274                 LOG_DEBUG("Examine %s failed", "CPUID");
2275                 return retval;
2276         }
2277
2278         retval = mem_ap_read_u32(armv8->debug_ap,
2279                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2280         retval += mem_ap_read_u32(armv8->debug_ap,
2281                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2282         if (retval != ERROR_OK) {
2283                 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2284                 return retval;
2285         }
2286         retval = mem_ap_read_u32(armv8->debug_ap,
2287                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2288         retval += mem_ap_read_u32(armv8->debug_ap,
2289                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2290         if (retval != ERROR_OK) {
2291                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2292                 return retval;
2293         }
2294
2295         retval = dap_run(armv8->debug_ap->dap);
2296         if (retval != ERROR_OK) {
2297                 LOG_ERROR("%s: examination failed\n", target_name(target));
2298                 return retval;
2299         }
2300
2301         ttypr |= tmp1;
2302         ttypr = (ttypr << 32) | tmp0;
2303         debug |= tmp3;
2304         debug = (debug << 32) | tmp2;
2305
2306         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2307         LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2308         LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2309
2310         if (target->private_config == NULL)
2311                 return ERROR_FAIL;
2312
2313         pc = (struct aarch64_private_config *)target->private_config;
2314         if (pc->cti == NULL)
2315                 return ERROR_FAIL;
2316
2317         armv8->cti = pc->cti;
2318
2319         retval = aarch64_dpm_setup(aarch64, debug);
2320         if (retval != ERROR_OK)
2321                 return retval;
2322
2323         /* Setup Breakpoint Register Pairs */
2324         aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2325         aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2326         aarch64->brp_num_available = aarch64->brp_num;
2327         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2328         for (i = 0; i < aarch64->brp_num; i++) {
2329                 aarch64->brp_list[i].used = 0;
2330                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2331                         aarch64->brp_list[i].type = BRP_NORMAL;
2332                 else
2333                         aarch64->brp_list[i].type = BRP_CONTEXT;
2334                 aarch64->brp_list[i].value = 0;
2335                 aarch64->brp_list[i].control = 0;
2336                 aarch64->brp_list[i].BRPn = i;
2337         }
2338
2339         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2340
2341         target->state = TARGET_UNKNOWN;
2342         target->debug_reason = DBG_REASON_NOTHALTED;
2343         aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2344         target_set_examined(target);
2345         return ERROR_OK;
2346 }
2347
2348 static int aarch64_examine(struct target *target)
2349 {
2350         int retval = ERROR_OK;
2351
2352         /* don't re-probe hardware after each reset */
2353         if (!target_was_examined(target))
2354                 retval = aarch64_examine_first(target);
2355
2356         /* Configure core debug access */
2357         if (retval == ERROR_OK)
2358                 retval = aarch64_init_debug_access(target);
2359
2360         return retval;
2361 }
2362
2363 /*
2364  *      Cortex-A8 target creation and initialization
2365  */
2366
2367 static int aarch64_init_target(struct command_context *cmd_ctx,
2368         struct target *target)
2369 {
2370         /* examine_first() does a bunch of this */
2371         arm_semihosting_init(target);
2372         return ERROR_OK;
2373 }
2374
2375 static int aarch64_init_arch_info(struct target *target,
2376         struct aarch64_common *aarch64, struct adiv5_dap *dap)
2377 {
2378         struct armv8_common *armv8 = &aarch64->armv8_common;
2379
2380         /* Setup struct aarch64_common */
2381         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2382         armv8->arm.dap = dap;
2383
2384         /* register arch-specific functions */
2385         armv8->examine_debug_reason = NULL;
2386         armv8->post_debug_entry = aarch64_post_debug_entry;
2387         armv8->pre_restore_context = NULL;
2388         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2389
2390         armv8_init_arch_info(target, armv8);
2391         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2392
2393         return ERROR_OK;
2394 }
2395
2396 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2397 {
2398         struct aarch64_private_config *pc = target->private_config;
2399         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2400
2401         if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2402                 return ERROR_FAIL;
2403
2404         return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2405 }
2406
2407 static void aarch64_deinit_target(struct target *target)
2408 {
2409         struct aarch64_common *aarch64 = target_to_aarch64(target);
2410         struct armv8_common *armv8 = &aarch64->armv8_common;
2411         struct arm_dpm *dpm = &armv8->dpm;
2412
2413         armv8_free_reg_cache(target);
2414         free(aarch64->brp_list);
2415         free(dpm->dbp);
2416         free(dpm->dwp);
2417         free(target->private_config);
2418         free(aarch64);
2419 }
2420
2421 static int aarch64_mmu(struct target *target, int *enabled)
2422 {
2423         if (target->state != TARGET_HALTED) {
2424                 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2425                 return ERROR_TARGET_INVALID;
2426         }
2427
2428         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2429         return ERROR_OK;
2430 }
2431
2432 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2433                              target_addr_t *phys)
2434 {
2435         return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2436 }
2437
2438 /*
2439  * private target configuration items
2440  */
2441 enum aarch64_cfg_param {
2442         CFG_CTI,
2443 };
2444
2445 static const Jim_Nvp nvp_config_opts[] = {
2446         { .name = "-cti", .value = CFG_CTI },
2447         { .name = NULL, .value = -1 }
2448 };
2449
2450 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2451 {
2452         struct aarch64_private_config *pc;
2453         Jim_Nvp *n;
2454         int e;
2455
2456         pc = (struct aarch64_private_config *)target->private_config;
2457         if (pc == NULL) {
2458                         pc = calloc(1, sizeof(struct aarch64_private_config));
2459                         target->private_config = pc;
2460         }
2461
2462         /*
2463          * Call adiv5_jim_configure() to parse the common DAP options
2464          * It will return JIM_CONTINUE if it didn't find any known
2465          * options, JIM_OK if it correctly parsed the topmost option
2466          * and JIM_ERR if an error occured during parameter evaluation.
2467          * For JIM_CONTINUE, we check our own params.
2468          */
2469         e = adiv5_jim_configure(target, goi);
2470         if (e != JIM_CONTINUE)
2471                 return e;
2472
2473         /* parse config or cget options ... */
2474         if (goi->argc > 0) {
2475                 Jim_SetEmptyResult(goi->interp);
2476
2477                 /* check first if topmost item is for us */
2478                 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2479                                 goi->argv[0], &n);
2480                 if (e != JIM_OK)
2481                         return JIM_CONTINUE;
2482
2483                 e = Jim_GetOpt_Obj(goi, NULL);
2484                 if (e != JIM_OK)
2485                         return e;
2486
2487                 switch (n->value) {
2488                 case CFG_CTI: {
2489                         if (goi->isconfigure) {
2490                                 Jim_Obj *o_cti;
2491                                 struct arm_cti *cti;
2492                                 e = Jim_GetOpt_Obj(goi, &o_cti);
2493                                 if (e != JIM_OK)
2494                                         return e;
2495                                 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2496                                 if (cti == NULL) {
2497                                         Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2498                                         return JIM_ERR;
2499                                 }
2500                                 pc->cti = cti;
2501                         } else {
2502                                 if (goi->argc != 0) {
2503                                         Jim_WrongNumArgs(goi->interp,
2504                                                         goi->argc, goi->argv,
2505                                                         "NO PARAMS");
2506                                         return JIM_ERR;
2507                                 }
2508
2509                                 if (pc == NULL || pc->cti == NULL) {
2510                                         Jim_SetResultString(goi->interp, "CTI not configured", -1);
2511                                         return JIM_ERR;
2512                                 }
2513                                 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2514                         }
2515                         break;
2516                 }
2517
2518                 default:
2519                         return JIM_CONTINUE;
2520                 }
2521         }
2522
2523         return JIM_OK;
2524 }
2525
2526 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2527 {
2528         struct target *target = get_current_target(CMD_CTX);
2529         struct armv8_common *armv8 = target_to_armv8(target);
2530
2531         return armv8_handle_cache_info_command(CMD_CTX,
2532                         &armv8->armv8_mmu.armv8_cache);
2533 }
2534
2535
2536 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2537 {
2538         struct target *target = get_current_target(CMD_CTX);
2539         if (!target_was_examined(target)) {
2540                 LOG_ERROR("target not examined yet");
2541                 return ERROR_FAIL;
2542         }
2543
2544         return aarch64_init_debug_access(target);
2545 }
2546 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2547 {
2548         struct target *target = get_current_target(CMD_CTX);
2549         /* check target is an smp target */
2550         struct target_list *head;
2551         struct target *curr;
2552         head = target->head;
2553         target->smp = 0;
2554         if (head != (struct target_list *)NULL) {
2555                 while (head != (struct target_list *)NULL) {
2556                         curr = head->target;
2557                         curr->smp = 0;
2558                         head = head->next;
2559                 }
2560                 /*  fixes the target display to the debugger */
2561                 target->gdb_service->target = target;
2562         }
2563         return ERROR_OK;
2564 }
2565
2566 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2567 {
2568         struct target *target = get_current_target(CMD_CTX);
2569         struct target_list *head;
2570         struct target *curr;
2571         head = target->head;
2572         if (head != (struct target_list *)NULL) {
2573                 target->smp = 1;
2574                 while (head != (struct target_list *)NULL) {
2575                         curr = head->target;
2576                         curr->smp = 1;
2577                         head = head->next;
2578                 }
2579         }
2580         return ERROR_OK;
2581 }
2582
2583 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2584 {
2585         struct target *target = get_current_target(CMD_CTX);
2586         struct aarch64_common *aarch64 = target_to_aarch64(target);
2587
2588         static const Jim_Nvp nvp_maskisr_modes[] = {
2589                 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2590                 { .name = "on", .value = AARCH64_ISRMASK_ON },
2591                 { .name = NULL, .value = -1 },
2592         };
2593         const Jim_Nvp *n;
2594
2595         if (CMD_ARGC > 0) {
2596                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2597                 if (n->name == NULL) {
2598                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2599                         return ERROR_COMMAND_SYNTAX_ERROR;
2600                 }
2601
2602                 aarch64->isrmasking_mode = n->value;
2603         }
2604
2605         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2606         command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2607
2608         return ERROR_OK;
2609 }
2610
2611 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2612 {
2613         struct command_context *context;
2614         struct target *target;
2615         struct arm *arm;
2616         int retval;
2617         bool is_mcr = false;
2618         int arg_cnt = 0;
2619
2620         if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2621                 is_mcr = true;
2622                 arg_cnt = 7;
2623         } else {
2624                 arg_cnt = 6;
2625         }
2626
2627         context = current_command_context(interp);
2628         assert(context != NULL);
2629
2630         target = get_current_target(context);
2631         if (target == NULL) {
2632                 LOG_ERROR("%s: no current target", __func__);
2633                 return JIM_ERR;
2634         }
2635         if (!target_was_examined(target)) {
2636                 LOG_ERROR("%s: not yet examined", target_name(target));
2637                 return JIM_ERR;
2638         }
2639
2640         arm = target_to_arm(target);
2641         if (!is_arm(arm)) {
2642                 LOG_ERROR("%s: not an ARM", target_name(target));
2643                 return JIM_ERR;
2644         }
2645
2646         if (target->state != TARGET_HALTED)
2647                 return ERROR_TARGET_NOT_HALTED;
2648
2649         if (arm->core_state == ARM_STATE_AARCH64) {
2650                 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2651                 return JIM_ERR;
2652         }
2653
2654         if (argc != arg_cnt) {
2655                 LOG_ERROR("%s: wrong number of arguments", __func__);
2656                 return JIM_ERR;
2657         }
2658
2659         int cpnum;
2660         uint32_t op1;
2661         uint32_t op2;
2662         uint32_t CRn;
2663         uint32_t CRm;
2664         uint32_t value;
2665         long l;
2666
2667         /* NOTE:  parameter sequence matches ARM instruction set usage:
2668          *      MCR     pNUM, op1, rX, CRn, CRm, op2    ; write CP from rX
2669          *      MRC     pNUM, op1, rX, CRn, CRm, op2    ; read CP into rX
2670          * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2671          */
2672         retval = Jim_GetLong(interp, argv[1], &l);
2673         if (retval != JIM_OK)
2674                 return retval;
2675         if (l & ~0xf) {
2676                 LOG_ERROR("%s: %s %d out of range", __func__,
2677                         "coprocessor", (int) l);
2678                 return JIM_ERR;
2679         }
2680         cpnum = l;
2681
2682         retval = Jim_GetLong(interp, argv[2], &l);
2683         if (retval != JIM_OK)
2684                 return retval;
2685         if (l & ~0x7) {
2686                 LOG_ERROR("%s: %s %d out of range", __func__,
2687                         "op1", (int) l);
2688                 return JIM_ERR;
2689         }
2690         op1 = l;
2691
2692         retval = Jim_GetLong(interp, argv[3], &l);
2693         if (retval != JIM_OK)
2694                 return retval;
2695         if (l & ~0xf) {
2696                 LOG_ERROR("%s: %s %d out of range", __func__,
2697                         "CRn", (int) l);
2698                 return JIM_ERR;
2699         }
2700         CRn = l;
2701
2702         retval = Jim_GetLong(interp, argv[4], &l);
2703         if (retval != JIM_OK)
2704                 return retval;
2705         if (l & ~0xf) {
2706                 LOG_ERROR("%s: %s %d out of range", __func__,
2707                         "CRm", (int) l);
2708                 return JIM_ERR;
2709         }
2710         CRm = l;
2711
2712         retval = Jim_GetLong(interp, argv[5], &l);
2713         if (retval != JIM_OK)
2714                 return retval;
2715         if (l & ~0x7) {
2716                 LOG_ERROR("%s: %s %d out of range", __func__,
2717                         "op2", (int) l);
2718                 return JIM_ERR;
2719         }
2720         op2 = l;
2721
2722         value = 0;
2723
2724         if (is_mcr == true) {
2725                 retval = Jim_GetLong(interp, argv[6], &l);
2726                 if (retval != JIM_OK)
2727                         return retval;
2728                 value = l;
2729
2730                 /* NOTE: parameters reordered! */
2731                 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2732                 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2733                 if (retval != ERROR_OK)
2734                         return JIM_ERR;
2735         } else {
2736                 /* NOTE: parameters reordered! */
2737                 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2738                 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2739                 if (retval != ERROR_OK)
2740                         return JIM_ERR;
2741
2742                 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2743         }
2744
2745         return JIM_OK;
2746 }
2747
2748 static const struct command_registration aarch64_exec_command_handlers[] = {
2749         {
2750                 .name = "cache_info",
2751                 .handler = aarch64_handle_cache_info_command,
2752                 .mode = COMMAND_EXEC,
2753                 .help = "display information about target caches",
2754                 .usage = "",
2755         },
2756         {
2757                 .name = "dbginit",
2758                 .handler = aarch64_handle_dbginit_command,
2759                 .mode = COMMAND_EXEC,
2760                 .help = "Initialize core debug",
2761                 .usage = "",
2762         },
2763         {       .name = "smp_off",
2764                 .handler = aarch64_handle_smp_off_command,
2765                 .mode = COMMAND_EXEC,
2766                 .help = "Stop smp handling",
2767                 .usage = "",
2768         },
2769         {
2770                 .name = "smp_on",
2771                 .handler = aarch64_handle_smp_on_command,
2772                 .mode = COMMAND_EXEC,
2773                 .help = "Restart smp handling",
2774                 .usage = "",
2775         },
2776         {
2777                 .name = "maskisr",
2778                 .handler = aarch64_mask_interrupts_command,
2779                 .mode = COMMAND_ANY,
2780                 .help = "mask aarch64 interrupts during single-step",
2781                 .usage = "['on'|'off']",
2782         },
2783         {
2784                 .name = "mcr",
2785                 .mode = COMMAND_EXEC,
2786                 .jim_handler = jim_mcrmrc,
2787                 .help = "write coprocessor register",
2788                 .usage = "cpnum op1 CRn CRm op2 value",
2789         },
2790         {
2791                 .name = "mrc",
2792                 .mode = COMMAND_EXEC,
2793                 .jim_handler = jim_mcrmrc,
2794                 .help = "read coprocessor register",
2795                 .usage = "cpnum op1 CRn CRm op2",
2796         },
2797
2798
2799         COMMAND_REGISTRATION_DONE
2800 };
2801
2802 static const struct command_registration aarch64_command_handlers[] = {
2803         {
2804                 .chain = armv8_command_handlers,
2805         },
2806         {
2807                 .name = "aarch64",
2808                 .mode = COMMAND_ANY,
2809                 .help = "Aarch64 command group",
2810                 .usage = "",
2811                 .chain = aarch64_exec_command_handlers,
2812         },
2813         COMMAND_REGISTRATION_DONE
2814 };
2815
2816 struct target_type aarch64_target = {
2817         .name = "aarch64",
2818
2819         .poll = aarch64_poll,
2820         .arch_state = armv8_arch_state,
2821
2822         .halt = aarch64_halt,
2823         .resume = aarch64_resume,
2824         .step = aarch64_step,
2825
2826         .assert_reset = aarch64_assert_reset,
2827         .deassert_reset = aarch64_deassert_reset,
2828
2829         /* REVISIT allow exporting VFP3 registers ... */
2830         .get_gdb_arch = armv8_get_gdb_arch,
2831         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2832
2833         .read_memory = aarch64_read_memory,
2834         .write_memory = aarch64_write_memory,
2835
2836         .add_breakpoint = aarch64_add_breakpoint,
2837         .add_context_breakpoint = aarch64_add_context_breakpoint,
2838         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2839         .remove_breakpoint = aarch64_remove_breakpoint,
2840         .add_watchpoint = NULL,
2841         .remove_watchpoint = NULL,
2842
2843         .commands = aarch64_command_handlers,
2844         .target_create = aarch64_target_create,
2845         .target_jim_configure = aarch64_jim_configure,
2846         .init_target = aarch64_init_target,
2847         .deinit_target = aarch64_deinit_target,
2848         .examine = aarch64_examine,
2849
2850         .read_phys_memory = aarch64_read_phys_memory,
2851         .write_phys_memory = aarch64_write_phys_memory,
2852         .mmu = aarch64_mmu,
2853         .virt2phys = aarch64_virt2phys,
2854 };