1 /* SPDX-License-Identifier: GPL-2.0-or-later */
11 #include "target/target.h"
12 #include "target/algorithm.h"
13 #include "target/target_type.h"
15 #include "jtag/jtag.h"
16 #include "target/register.h"
17 #include "target/breakpoints.h"
18 #include "helper/time_support.h"
21 #include "rtos/rtos.h"
23 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
24 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
26 #define DIM(x) (sizeof(x)/sizeof(*x))
28 /* Constants for legacy SiFive hardware breakpoints. */
29 #define CSR_BPCONTROL_X (1<<0)
30 #define CSR_BPCONTROL_W (1<<1)
31 #define CSR_BPCONTROL_R (1<<2)
32 #define CSR_BPCONTROL_U (1<<3)
33 #define CSR_BPCONTROL_S (1<<4)
34 #define CSR_BPCONTROL_H (1<<5)
35 #define CSR_BPCONTROL_M (1<<6)
36 #define CSR_BPCONTROL_BPMATCH (0xf<<7)
37 #define CSR_BPCONTROL_BPACTION (0xff<<11)
39 #define DEBUG_ROM_START 0x800
40 #define DEBUG_ROM_RESUME (DEBUG_ROM_START + 4)
41 #define DEBUG_ROM_EXCEPTION (DEBUG_ROM_START + 8)
42 #define DEBUG_RAM_START 0x400
44 #define SETHALTNOT 0x10c
46 /*** JTAG registers. ***/
48 #define DTMCONTROL 0x10
49 #define DTMCONTROL_DBUS_RESET (1<<16)
50 #define DTMCONTROL_IDLE (7<<10)
51 #define DTMCONTROL_ADDRBITS (0xf<<4)
52 #define DTMCONTROL_VERSION (0xf)
55 #define DBUS_OP_START 0
56 #define DBUS_OP_SIZE 2
63 DBUS_STATUS_SUCCESS = 0,
64 DBUS_STATUS_FAILED = 2,
67 #define DBUS_DATA_START 2
68 #define DBUS_DATA_SIZE 34
69 #define DBUS_ADDRESS_START 36
77 /*** Debug Bus registers. ***/
79 #define DMCONTROL 0x10
80 #define DMCONTROL_INTERRUPT (((uint64_t)1)<<33)
81 #define DMCONTROL_HALTNOT (((uint64_t)1)<<32)
82 #define DMCONTROL_BUSERROR (7<<19)
83 #define DMCONTROL_SERIAL (3<<16)
84 #define DMCONTROL_AUTOINCREMENT (1<<15)
85 #define DMCONTROL_ACCESS (7<<12)
86 #define DMCONTROL_HARTID (0x3ff<<2)
87 #define DMCONTROL_NDRESET (1<<1)
88 #define DMCONTROL_FULLRESET 1
91 #define DMINFO_ABUSSIZE (0x7fU<<25)
92 #define DMINFO_SERIALCOUNT (0xf<<21)
93 #define DMINFO_ACCESS128 (1<<20)
94 #define DMINFO_ACCESS64 (1<<19)
95 #define DMINFO_ACCESS32 (1<<18)
96 #define DMINFO_ACCESS16 (1<<17)
97 #define DMINFO_ACCESS8 (1<<16)
98 #define DMINFO_DRAMSIZE (0x3f<<10)
99 #define DMINFO_AUTHENTICATED (1<<5)
100 #define DMINFO_AUTHBUSY (1<<4)
101 #define DMINFO_AUTHTYPE (3<<2)
102 #define DMINFO_VERSION 3
104 /*** Info about the core being debugged. ***/
106 #define DBUS_ADDRESS_UNKNOWN 0xffff
109 #define DRAM_CACHE_SIZE 16
111 uint8_t ir_dtmcontrol[4] = {DTMCONTROL};
112 struct scan_field select_dtmcontrol = {
114 .out_value = ir_dtmcontrol
116 uint8_t ir_dbus[4] = {DBUS};
117 struct scan_field select_dbus = {
121 uint8_t ir_idcode[4] = {0x1};
122 struct scan_field select_idcode = {
124 .out_value = ir_idcode
127 bscan_tunnel_type_t bscan_tunnel_type;
128 int bscan_tunnel_ir_width; /* if zero, then tunneling is not present/active */
130 static uint8_t bscan_zero[4] = {0};
131 static uint8_t bscan_one[4] = {1};
133 uint8_t ir_user4[4] = {0x23};
134 struct scan_field select_user4 = {
136 .out_value = ir_user4
140 uint8_t bscan_tunneled_ir_width[4] = {5}; /* overridden by assignment in riscv_init_target */
141 struct scan_field _bscan_tunnel_data_register_select_dmi[] = {
144 .out_value = bscan_zero,
148 .num_bits = 5, /* initialized in riscv_init_target to ir width of DM */
149 .out_value = ir_dbus,
154 .out_value = bscan_tunneled_ir_width,
159 .out_value = bscan_zero,
164 struct scan_field _bscan_tunnel_nested_tap_select_dmi[] = {
167 .out_value = bscan_zero,
172 .out_value = bscan_tunneled_ir_width,
176 .num_bits = 0, /* initialized in riscv_init_target to ir width of DM */
177 .out_value = ir_dbus,
182 .out_value = bscan_zero,
186 struct scan_field *bscan_tunnel_nested_tap_select_dmi = _bscan_tunnel_nested_tap_select_dmi;
187 uint32_t bscan_tunnel_nested_tap_select_dmi_num_fields = DIM(_bscan_tunnel_nested_tap_select_dmi);
189 struct scan_field *bscan_tunnel_data_register_select_dmi = _bscan_tunnel_data_register_select_dmi;
190 uint32_t bscan_tunnel_data_register_select_dmi_num_fields = DIM(_bscan_tunnel_data_register_select_dmi);
197 bool read, write, execute;
201 /* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
202 int riscv_command_timeout_sec = DEFAULT_COMMAND_TIMEOUT_SEC;
204 /* Wall-clock timeout after reset. Settable via RISC-V Target commands.*/
205 int riscv_reset_timeout_sec = DEFAULT_RESET_TIMEOUT_SEC;
207 bool riscv_prefer_sba;
208 bool riscv_enable_virt2phys = true;
209 bool riscv_ebreakm = true;
210 bool riscv_ebreaks = true;
211 bool riscv_ebreaku = true;
213 bool riscv_enable_virtual;
219 /* In addition to the ones in the standard spec, we'll also expose additional
221 * The list is either NULL, or a series of ranges (inclusive), terminated with
224 /* Same, but for custom registers. */
225 range_t *expose_custom;
232 virt2phys_info_t sv32 = {
237 .vpn_shift = {12, 22},
238 .vpn_mask = {0x3ff, 0x3ff},
239 .pte_ppn_shift = {10, 20},
240 .pte_ppn_mask = {0x3ff, 0xfff},
241 .pa_ppn_shift = {12, 22},
242 .pa_ppn_mask = {0x3ff, 0xfff},
245 virt2phys_info_t sv39 = {
250 .vpn_shift = {12, 21, 30},
251 .vpn_mask = {0x1ff, 0x1ff, 0x1ff},
252 .pte_ppn_shift = {10, 19, 28},
253 .pte_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
254 .pa_ppn_shift = {12, 21, 30},
255 .pa_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
258 virt2phys_info_t sv48 = {
263 .vpn_shift = {12, 21, 30, 39},
264 .vpn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ff},
265 .pte_ppn_shift = {10, 19, 28, 37},
266 .pte_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
267 .pa_ppn_shift = {12, 21, 30, 39},
268 .pa_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
271 static int riscv_resume_go_all_harts(struct target *target);
273 void select_dmi_via_bscan(struct target *target)
275 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
276 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
277 jtag_add_dr_scan(target->tap, bscan_tunnel_data_register_select_dmi_num_fields,
278 bscan_tunnel_data_register_select_dmi, TAP_IDLE);
279 else /* BSCAN_TUNNEL_NESTED_TAP */
280 jtag_add_dr_scan(target->tap, bscan_tunnel_nested_tap_select_dmi_num_fields,
281 bscan_tunnel_nested_tap_select_dmi, TAP_IDLE);
284 uint32_t dtmcontrol_scan_via_bscan(struct target *target, uint32_t out)
286 /* On BSCAN TAP: Select IR=USER4, issue tunneled IR scan via BSCAN TAP's DR */
287 uint8_t tunneled_ir_width[4] = {bscan_tunnel_ir_width};
288 uint8_t tunneled_dr_width[4] = {32};
289 uint8_t out_value[5] = {0};
290 uint8_t in_value[5] = {0};
292 buf_set_u32(out_value, 0, 32, out);
293 struct scan_field tunneled_ir[4] = {};
294 struct scan_field tunneled_dr[4] = {};
296 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
297 tunneled_ir[0].num_bits = 3;
298 tunneled_ir[0].out_value = bscan_zero;
299 tunneled_ir[0].in_value = NULL;
300 tunneled_ir[1].num_bits = bscan_tunnel_ir_width;
301 tunneled_ir[1].out_value = ir_dtmcontrol;
302 tunneled_ir[1].in_value = NULL;
303 tunneled_ir[2].num_bits = 7;
304 tunneled_ir[2].out_value = tunneled_ir_width;
305 tunneled_ir[2].in_value = NULL;
306 tunneled_ir[3].num_bits = 1;
307 tunneled_ir[3].out_value = bscan_zero;
308 tunneled_ir[3].in_value = NULL;
310 tunneled_dr[0].num_bits = 3;
311 tunneled_dr[0].out_value = bscan_zero;
312 tunneled_dr[0].in_value = NULL;
313 tunneled_dr[1].num_bits = 32 + 1;
314 tunneled_dr[1].out_value = out_value;
315 tunneled_dr[1].in_value = in_value;
316 tunneled_dr[2].num_bits = 7;
317 tunneled_dr[2].out_value = tunneled_dr_width;
318 tunneled_dr[2].in_value = NULL;
319 tunneled_dr[3].num_bits = 1;
320 tunneled_dr[3].out_value = bscan_one;
321 tunneled_dr[3].in_value = NULL;
323 /* BSCAN_TUNNEL_NESTED_TAP */
324 tunneled_ir[3].num_bits = 3;
325 tunneled_ir[3].out_value = bscan_zero;
326 tunneled_ir[3].in_value = NULL;
327 tunneled_ir[2].num_bits = bscan_tunnel_ir_width;
328 tunneled_ir[2].out_value = ir_dtmcontrol;
329 tunneled_ir[1].in_value = NULL;
330 tunneled_ir[1].num_bits = 7;
331 tunneled_ir[1].out_value = tunneled_ir_width;
332 tunneled_ir[2].in_value = NULL;
333 tunneled_ir[0].num_bits = 1;
334 tunneled_ir[0].out_value = bscan_zero;
335 tunneled_ir[0].in_value = NULL;
337 tunneled_dr[3].num_bits = 3;
338 tunneled_dr[3].out_value = bscan_zero;
339 tunneled_dr[3].in_value = NULL;
340 tunneled_dr[2].num_bits = 32 + 1;
341 tunneled_dr[2].out_value = out_value;
342 tunneled_dr[2].in_value = in_value;
343 tunneled_dr[1].num_bits = 7;
344 tunneled_dr[1].out_value = tunneled_dr_width;
345 tunneled_dr[1].in_value = NULL;
346 tunneled_dr[0].num_bits = 1;
347 tunneled_dr[0].out_value = bscan_one;
348 tunneled_dr[0].in_value = NULL;
350 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
351 jtag_add_dr_scan(target->tap, DIM(tunneled_ir), tunneled_ir, TAP_IDLE);
352 jtag_add_dr_scan(target->tap, DIM(tunneled_dr), tunneled_dr, TAP_IDLE);
353 select_dmi_via_bscan(target);
355 int retval = jtag_execute_queue();
356 if (retval != ERROR_OK) {
357 LOG_ERROR("failed jtag scan: %d", retval);
360 /* Note the starting offset is bit 1, not bit 0. In BSCAN tunnel, there is a one-bit TCK skew between
362 uint32_t in = buf_get_u32(in_value, 1, 32);
363 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
370 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
372 struct scan_field field;
374 uint8_t out_value[4] = { 0 };
376 if (bscan_tunnel_ir_width != 0)
377 return dtmcontrol_scan_via_bscan(target, out);
380 buf_set_u32(out_value, 0, 32, out);
382 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
385 field.out_value = out_value;
386 field.in_value = in_value;
387 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
389 /* Always return to dbus. */
390 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
392 int retval = jtag_execute_queue();
393 if (retval != ERROR_OK) {
394 LOG_ERROR("failed jtag scan: %d", retval);
398 uint32_t in = buf_get_u32(field.in_value, 0, 32);
399 LOG_DEBUG("DTMCONTROL: 0x%x -> 0x%x", out, in);
404 static struct target_type *get_target_type(struct target *target)
406 riscv_info_t *info = (riscv_info_t *) target->arch_info;
409 LOG_ERROR("Target has not been initialized");
413 switch (info->dtm_version) {
415 return &riscv011_target;
417 return &riscv013_target;
419 LOG_ERROR("Unsupported DTM version: %d", info->dtm_version);
424 static int riscv_init_target(struct command_context *cmd_ctx,
425 struct target *target)
427 LOG_DEBUG("riscv_init_target()");
428 target->arch_info = calloc(1, sizeof(riscv_info_t));
429 if (!target->arch_info)
431 riscv_info_t *info = (riscv_info_t *) target->arch_info;
432 riscv_info_init(target, info);
433 info->cmd_ctx = cmd_ctx;
435 select_dtmcontrol.num_bits = target->tap->ir_length;
436 select_dbus.num_bits = target->tap->ir_length;
437 select_idcode.num_bits = target->tap->ir_length;
439 if (bscan_tunnel_ir_width != 0) {
440 select_user4.num_bits = target->tap->ir_length;
441 bscan_tunneled_ir_width[0] = bscan_tunnel_ir_width;
442 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
443 bscan_tunnel_data_register_select_dmi[1].num_bits = bscan_tunnel_ir_width;
444 else /* BSCAN_TUNNEL_NESTED_TAP */
445 bscan_tunnel_nested_tap_select_dmi[2].num_bits = bscan_tunnel_ir_width;
448 riscv_semihosting_init(target);
450 target->debug_reason = DBG_REASON_DBGRQ;
455 static void riscv_free_registers(struct target *target)
457 /* Free the shared structure use for most registers. */
458 if (target->reg_cache) {
459 if (target->reg_cache->reg_list) {
460 free(target->reg_cache->reg_list[0].arch_info);
461 /* Free the ones we allocated separately. */
462 for (unsigned i = GDB_REGNO_COUNT; i < target->reg_cache->num_regs; i++)
463 free(target->reg_cache->reg_list[i].arch_info);
464 free(target->reg_cache->reg_list);
466 free(target->reg_cache);
470 static void riscv_deinit_target(struct target *target)
472 LOG_DEBUG("riscv_deinit_target()");
473 struct target_type *tt = get_target_type(target);
475 tt->deinit_target(target);
476 riscv_info_t *info = (riscv_info_t *) target->arch_info;
477 free(info->reg_names);
481 riscv_free_registers(target);
483 target->arch_info = NULL;
486 static void trigger_from_breakpoint(struct trigger *trigger,
487 const struct breakpoint *breakpoint)
489 trigger->address = breakpoint->address;
490 trigger->length = breakpoint->length;
491 trigger->mask = ~0LL;
492 trigger->read = false;
493 trigger->write = false;
494 trigger->execute = true;
495 /* unique_id is unique across both breakpoints and watchpoints. */
496 trigger->unique_id = breakpoint->unique_id;
499 static int maybe_add_trigger_t1(struct target *target, unsigned hartid,
500 struct trigger *trigger, uint64_t tdata1)
504 const uint32_t bpcontrol_x = 1<<0;
505 const uint32_t bpcontrol_w = 1<<1;
506 const uint32_t bpcontrol_r = 1<<2;
507 const uint32_t bpcontrol_u = 1<<3;
508 const uint32_t bpcontrol_s = 1<<4;
509 const uint32_t bpcontrol_h = 1<<5;
510 const uint32_t bpcontrol_m = 1<<6;
511 const uint32_t bpcontrol_bpmatch = 0xf << 7;
512 const uint32_t bpcontrol_bpaction = 0xff << 11;
514 if (tdata1 & (bpcontrol_r | bpcontrol_w | bpcontrol_x)) {
515 /* Trigger is already in use, presumably by user code. */
516 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
519 tdata1 = set_field(tdata1, bpcontrol_r, trigger->read);
520 tdata1 = set_field(tdata1, bpcontrol_w, trigger->write);
521 tdata1 = set_field(tdata1, bpcontrol_x, trigger->execute);
522 tdata1 = set_field(tdata1, bpcontrol_u,
523 !!(r->misa[hartid] & (1 << ('U' - 'A'))));
524 tdata1 = set_field(tdata1, bpcontrol_s,
525 !!(r->misa[hartid] & (1 << ('S' - 'A'))));
526 tdata1 = set_field(tdata1, bpcontrol_h,
527 !!(r->misa[hartid] & (1 << ('H' - 'A'))));
528 tdata1 |= bpcontrol_m;
529 tdata1 = set_field(tdata1, bpcontrol_bpmatch, 0); /* exact match */
530 tdata1 = set_field(tdata1, bpcontrol_bpaction, 0); /* cause bp exception */
532 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
534 riscv_reg_t tdata1_rb;
535 if (riscv_get_register_on_hart(target, &tdata1_rb, hartid,
536 GDB_REGNO_TDATA1) != ERROR_OK)
538 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
540 if (tdata1 != tdata1_rb) {
541 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
542 PRIx64 " to tdata1 it contains 0x%" PRIx64,
544 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
545 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
548 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
553 static int maybe_add_trigger_t2(struct target *target, unsigned hartid,
554 struct trigger *trigger, uint64_t tdata1)
558 /* tselect is already set */
559 if (tdata1 & (MCONTROL_EXECUTE | MCONTROL_STORE | MCONTROL_LOAD)) {
560 /* Trigger is already in use, presumably by user code. */
561 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
564 /* address/data match trigger */
565 tdata1 |= MCONTROL_DMODE(riscv_xlen(target));
566 tdata1 = set_field(tdata1, MCONTROL_ACTION,
567 MCONTROL_ACTION_DEBUG_MODE);
568 tdata1 = set_field(tdata1, MCONTROL_MATCH, MCONTROL_MATCH_EQUAL);
569 tdata1 |= MCONTROL_M;
570 if (r->misa[hartid] & (1 << ('H' - 'A')))
571 tdata1 |= MCONTROL_H;
572 if (r->misa[hartid] & (1 << ('S' - 'A')))
573 tdata1 |= MCONTROL_S;
574 if (r->misa[hartid] & (1 << ('U' - 'A')))
575 tdata1 |= MCONTROL_U;
577 if (trigger->execute)
578 tdata1 |= MCONTROL_EXECUTE;
580 tdata1 |= MCONTROL_LOAD;
582 tdata1 |= MCONTROL_STORE;
584 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
587 int result = riscv_get_register_on_hart(target, &tdata1_rb, hartid, GDB_REGNO_TDATA1);
588 if (result != ERROR_OK)
590 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
592 if (tdata1 != tdata1_rb) {
593 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
594 PRIx64 " to tdata1 it contains 0x%" PRIx64,
596 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
597 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
600 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
605 static int add_trigger(struct target *target, struct trigger *trigger)
609 if (riscv_enumerate_triggers(target) != ERROR_OK)
612 /* In RTOS mode, we need to set the same trigger in the same slot on every
613 * hart, to keep up the illusion that each hart is a thread running on the
616 /* Otherwise, we just set the trigger on the one hart this target deals
619 riscv_reg_t tselect[RISCV_MAX_HARTS];
622 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
623 if (!riscv_hart_enabled(target, hartid))
627 int result = riscv_get_register_on_hart(target, &tselect[hartid],
628 hartid, GDB_REGNO_TSELECT);
629 if (result != ERROR_OK)
632 assert(first_hart >= 0);
635 for (i = 0; i < r->trigger_count[first_hart]; i++) {
636 if (r->trigger_unique_id[i] != -1)
639 riscv_set_register_on_hart(target, first_hart, GDB_REGNO_TSELECT, i);
642 int result = riscv_get_register_on_hart(target, &tdata1, first_hart,
644 if (result != ERROR_OK)
646 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
649 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
650 if (!riscv_hart_enabled(target, hartid))
652 if (hartid > first_hart)
653 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
656 result = maybe_add_trigger_t1(target, hartid, trigger, tdata1);
659 result = maybe_add_trigger_t2(target, hartid, trigger, tdata1);
662 LOG_DEBUG("trigger %d has unknown type %d", i, type);
666 if (result != ERROR_OK)
670 if (result != ERROR_OK)
673 LOG_DEBUG("[%d] Using trigger %d (type %d) for bp %d", target->coreid,
674 i, type, trigger->unique_id);
675 r->trigger_unique_id[i] = trigger->unique_id;
679 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
680 if (!riscv_hart_enabled(target, hartid))
682 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT,
686 if (i >= r->trigger_count[first_hart]) {
687 LOG_ERROR("Couldn't find an available hardware trigger.");
688 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
694 int riscv_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
696 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, breakpoint->address);
698 if (breakpoint->type == BKPT_SOFT) {
699 /** @todo check RVC for size/alignment */
700 if (!(breakpoint->length == 4 || breakpoint->length == 2)) {
701 LOG_ERROR("Invalid breakpoint length %d", breakpoint->length);
705 if (0 != (breakpoint->address % 2)) {
706 LOG_ERROR("Invalid breakpoint alignment for address 0x%" TARGET_PRIxADDR, breakpoint->address);
710 if (target_read_memory(target, breakpoint->address, 2, breakpoint->length / 2,
711 breakpoint->orig_instr) != ERROR_OK) {
712 LOG_ERROR("Failed to read original instruction at 0x%" TARGET_PRIxADDR,
713 breakpoint->address);
717 uint8_t buff[4] = { 0 };
718 buf_set_u32(buff, 0, breakpoint->length * CHAR_BIT, breakpoint->length == 4 ? ebreak() : ebreak_c());
719 int const retval = target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2, buff);
721 if (retval != ERROR_OK) {
722 LOG_ERROR("Failed to write %d-byte breakpoint instruction at 0x%"
723 TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
727 } else if (breakpoint->type == BKPT_HARD) {
728 struct trigger trigger;
729 trigger_from_breakpoint(&trigger, breakpoint);
730 int const result = add_trigger(target, &trigger);
731 if (result != ERROR_OK)
734 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
735 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
738 breakpoint->set = true;
742 static int remove_trigger(struct target *target, struct trigger *trigger)
746 if (riscv_enumerate_triggers(target) != ERROR_OK)
750 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
751 if (!riscv_hart_enabled(target, hartid))
753 if (first_hart < 0) {
758 assert(first_hart >= 0);
761 for (i = 0; i < r->trigger_count[first_hart]; i++) {
762 if (r->trigger_unique_id[i] == trigger->unique_id)
765 if (i >= r->trigger_count[first_hart]) {
766 LOG_ERROR("Couldn't find the hardware resources used by hardware "
770 LOG_DEBUG("[%d] Stop using resource %d for bp %d", target->coreid, i,
772 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
773 if (!riscv_hart_enabled(target, hartid))
776 int result = riscv_get_register_on_hart(target, &tselect, hartid, GDB_REGNO_TSELECT);
777 if (result != ERROR_OK)
779 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
780 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
781 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
783 r->trigger_unique_id[i] = -1;
788 int riscv_remove_breakpoint(struct target *target,
789 struct breakpoint *breakpoint)
791 if (breakpoint->type == BKPT_SOFT) {
792 if (target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2,
793 breakpoint->orig_instr) != ERROR_OK) {
794 LOG_ERROR("Failed to restore instruction for %d-byte breakpoint at "
795 "0x%" TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
799 } else if (breakpoint->type == BKPT_HARD) {
800 struct trigger trigger;
801 trigger_from_breakpoint(&trigger, breakpoint);
802 int result = remove_trigger(target, &trigger);
803 if (result != ERROR_OK)
807 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
808 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
811 breakpoint->set = false;
816 static void trigger_from_watchpoint(struct trigger *trigger,
817 const struct watchpoint *watchpoint)
819 trigger->address = watchpoint->address;
820 trigger->length = watchpoint->length;
821 trigger->mask = watchpoint->mask;
822 trigger->value = watchpoint->value;
823 trigger->read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
824 trigger->write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
825 trigger->execute = false;
826 /* unique_id is unique across both breakpoints and watchpoints. */
827 trigger->unique_id = watchpoint->unique_id;
830 int riscv_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
832 struct trigger trigger;
833 trigger_from_watchpoint(&trigger, watchpoint);
835 int result = add_trigger(target, &trigger);
836 if (result != ERROR_OK)
838 watchpoint->set = true;
843 int riscv_remove_watchpoint(struct target *target,
844 struct watchpoint *watchpoint)
846 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, watchpoint->address);
848 struct trigger trigger;
849 trigger_from_watchpoint(&trigger, watchpoint);
851 int result = remove_trigger(target, &trigger);
852 if (result != ERROR_OK)
854 watchpoint->set = false;
859 /* Sets *hit_watchpoint to the first watchpoint identified as causing the
862 * The GDB server uses this information to tell GDB what data address has
863 * been hit, which enables GDB to print the hit variable along with its old
865 int riscv_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
867 struct watchpoint *wp = target->watchpoints;
869 if (riscv_rtos_enabled(target))
870 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
871 LOG_DEBUG("Current hartid = %d", riscv_current_hartid(target));
873 /*TODO instead of disassembling the instruction that we think caused the
874 * trigger, check the hit bit of each watchpoint first. The hit bit is
875 * simpler and more reliable to check but as it is optional and relatively
876 * new, not all hardware will implement it */
878 riscv_get_register(target, &dpc, GDB_REGNO_DPC);
879 const uint8_t length = 4;
880 LOG_DEBUG("dpc is 0x%" PRIx64, dpc);
882 /* fetch the instruction at dpc */
883 uint8_t buffer[length];
884 if (target_read_buffer(target, dpc, length, buffer) != ERROR_OK) {
885 LOG_ERROR("Failed to read instruction at dpc 0x%" PRIx64, dpc);
889 uint32_t instruction = 0;
891 for (int i = 0; i < length; i++) {
892 LOG_DEBUG("Next byte is %x", buffer[i]);
893 instruction += (buffer[i] << 8 * i);
895 LOG_DEBUG("Full instruction is %x", instruction);
897 /* find out which memory address is accessed by the instruction at dpc */
898 /* opcode is first 7 bits of the instruction */
899 uint8_t opcode = instruction & 0x7F;
902 riscv_reg_t mem_addr;
904 if (opcode == MATCH_LB || opcode == MATCH_SB) {
905 rs1 = (instruction & 0xf8000) >> 15;
906 riscv_get_register(target, &mem_addr, rs1);
908 if (opcode == MATCH_SB) {
909 LOG_DEBUG("%x is store instruction", instruction);
910 imm = ((instruction & 0xf80) >> 7) | ((instruction & 0xfe000000) >> 20);
912 LOG_DEBUG("%x is load instruction", instruction);
913 imm = (instruction & 0xfff00000) >> 20;
915 /* sign extend 12-bit imm to 16-bits */
919 LOG_DEBUG("memory address=0x%" PRIx64, mem_addr);
921 LOG_DEBUG("%x is not a RV32I load or store", instruction);
926 /*TODO support length/mask */
927 if (wp->address == mem_addr) {
928 *hit_watchpoint = wp;
929 LOG_DEBUG("Hit address=%" TARGET_PRIxADDR, wp->address);
935 /* No match found - either we hit a watchpoint caused by an instruction that
936 * this function does not yet disassemble, or we hit a breakpoint.
938 * OpenOCD will behave as if this function had never been implemented i.e.
939 * report the halt to GDB with no address information. */
944 static int oldriscv_step(struct target *target, int current, uint32_t address,
945 int handle_breakpoints)
947 struct target_type *tt = get_target_type(target);
948 return tt->step(target, current, address, handle_breakpoints);
951 static int old_or_new_riscv_step(struct target *target, int current,
952 target_addr_t address, int handle_breakpoints)
955 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
956 if (r->is_halted == NULL)
957 return oldriscv_step(target, current, address, handle_breakpoints);
959 return riscv_openocd_step(target, current, address, handle_breakpoints);
963 static int riscv_examine(struct target *target)
965 LOG_DEBUG("riscv_examine()");
966 if (target_was_examined(target)) {
967 LOG_DEBUG("Target was already examined.");
971 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
973 riscv_info_t *info = (riscv_info_t *) target->arch_info;
974 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
975 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
976 info->dtm_version = get_field(dtmcontrol, DTMCONTROL_VERSION);
977 LOG_DEBUG(" version=0x%x", info->dtm_version);
979 struct target_type *tt = get_target_type(target);
983 int result = tt->init_target(info->cmd_ctx, target);
984 if (result != ERROR_OK)
987 return tt->examine(target);
990 static int oldriscv_poll(struct target *target)
992 struct target_type *tt = get_target_type(target);
993 return tt->poll(target);
996 static int old_or_new_riscv_poll(struct target *target)
999 if (r->is_halted == NULL)
1000 return oldriscv_poll(target);
1002 return riscv_openocd_poll(target);
1005 int halt_prep(struct target *target)
1008 for (int i = 0; i < riscv_count_harts(target); ++i) {
1009 if (!riscv_hart_enabled(target, i))
1012 LOG_DEBUG("[%s] prep hart, debug_reason=%d", target_name(target),
1013 target->debug_reason);
1014 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1016 if (riscv_is_halted(target)) {
1017 LOG_DEBUG("Hart %d is already halted (reason=%d).", i,
1018 target->debug_reason);
1020 if (r->halt_prep(target) != ERROR_OK)
1028 int riscv_halt_go_all_harts(struct target *target)
1031 for (int i = 0; i < riscv_count_harts(target); ++i) {
1032 if (!riscv_hart_enabled(target, i))
1035 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1037 if (riscv_is_halted(target)) {
1038 LOG_DEBUG("Hart %d is already halted.", i);
1040 if (r->halt_go(target) != ERROR_OK)
1045 riscv_invalidate_register_cache(target);
1050 int halt_go(struct target *target)
1052 riscv_info_t *r = riscv_info(target);
1054 if (r->is_halted == NULL) {
1055 struct target_type *tt = get_target_type(target);
1056 result = tt->halt(target);
1058 result = riscv_halt_go_all_harts(target);
1060 target->state = TARGET_HALTED;
1061 if (target->debug_reason == DBG_REASON_NOTHALTED)
1062 target->debug_reason = DBG_REASON_DBGRQ;
1067 static int halt_finish(struct target *target)
1069 return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1072 int riscv_halt(struct target *target)
1076 if (r->is_halted == NULL) {
1077 struct target_type *tt = get_target_type(target);
1078 return tt->halt(target);
1081 LOG_DEBUG("[%d] halting all harts", target->coreid);
1083 int result = ERROR_OK;
1085 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1086 struct target *t = tlist->target;
1087 if (halt_prep(t) != ERROR_OK)
1088 result = ERROR_FAIL;
1091 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1092 struct target *t = tlist->target;
1093 riscv_info_t *i = riscv_info(t);
1095 if (halt_go(t) != ERROR_OK)
1096 result = ERROR_FAIL;
1100 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1101 struct target *t = tlist->target;
1102 if (halt_finish(t) != ERROR_OK)
1107 if (halt_prep(target) != ERROR_OK)
1108 result = ERROR_FAIL;
1109 if (halt_go(target) != ERROR_OK)
1110 result = ERROR_FAIL;
1111 if (halt_finish(target) != ERROR_OK)
1115 if (riscv_rtos_enabled(target)) {
1116 if (r->rtos_hartid != -1) {
1117 LOG_DEBUG("halt requested on RTOS hartid %d", r->rtos_hartid);
1118 target->rtos->current_threadid = r->rtos_hartid + 1;
1119 target->rtos->current_thread = r->rtos_hartid + 1;
1121 LOG_DEBUG("halt requested, but no known RTOS hartid");
1127 static int riscv_assert_reset(struct target *target)
1129 LOG_DEBUG("[%d]", target->coreid);
1130 struct target_type *tt = get_target_type(target);
1131 riscv_invalidate_register_cache(target);
1132 return tt->assert_reset(target);
1135 static int riscv_deassert_reset(struct target *target)
1137 LOG_DEBUG("[%d]", target->coreid);
1138 struct target_type *tt = get_target_type(target);
1139 return tt->deassert_reset(target);
1142 int riscv_resume_prep_all_harts(struct target *target)
1145 for (int i = 0; i < riscv_count_harts(target); ++i) {
1146 if (!riscv_hart_enabled(target, i))
1149 LOG_DEBUG("prep hart %d", i);
1150 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1152 if (riscv_is_halted(target)) {
1153 if (r->resume_prep(target) != ERROR_OK)
1156 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
1160 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1166 /* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
1167 static int disable_triggers(struct target *target, riscv_reg_t *state)
1171 LOG_DEBUG("deal with triggers");
1173 if (riscv_enumerate_triggers(target) != ERROR_OK)
1176 int hartid = riscv_current_hartid(target);
1177 if (r->manual_hwbp_set) {
1178 /* Look at every trigger that may have been set. */
1179 riscv_reg_t tselect;
1180 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1182 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1183 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1186 if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
1188 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
1190 if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
1194 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1198 /* Just go through the triggers we manage. */
1199 struct watchpoint *watchpoint = target->watchpoints;
1201 while (watchpoint) {
1202 LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
1203 state[i] = watchpoint->set;
1204 if (watchpoint->set) {
1205 if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
1208 watchpoint = watchpoint->next;
1216 static int enable_triggers(struct target *target, riscv_reg_t *state)
1220 int hartid = riscv_current_hartid(target);
1222 if (r->manual_hwbp_set) {
1223 /* Look at every trigger that may have been set. */
1224 riscv_reg_t tselect;
1225 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1227 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1228 if (state[t] != 0) {
1229 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1231 if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
1235 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1239 struct watchpoint *watchpoint = target->watchpoints;
1241 while (watchpoint) {
1242 LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
1244 if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
1247 watchpoint = watchpoint->next;
1256 * Get everything ready to resume.
1258 static int resume_prep(struct target *target, int current,
1259 target_addr_t address, int handle_breakpoints, int debug_execution)
1262 LOG_DEBUG("[%d]", target->coreid);
1265 riscv_set_register(target, GDB_REGNO_PC, address);
1267 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1268 /* To be able to run off a trigger, disable all the triggers, step, and
1269 * then resume as usual. */
1270 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
1272 if (disable_triggers(target, trigger_state) != ERROR_OK)
1275 if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
1278 if (enable_triggers(target, trigger_state) != ERROR_OK)
1283 if (riscv_resume_prep_all_harts(target) != ERROR_OK)
1287 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1294 * Resume all the harts that have been prepped, as close to instantaneous as
1297 static int resume_go(struct target *target, int current,
1298 target_addr_t address, int handle_breakpoints, int debug_execution)
1300 riscv_info_t *r = riscv_info(target);
1302 if (r->is_halted == NULL) {
1303 struct target_type *tt = get_target_type(target);
1304 result = tt->resume(target, current, address, handle_breakpoints,
1307 result = riscv_resume_go_all_harts(target);
1313 static int resume_finish(struct target *target)
1315 register_cache_invalidate(target->reg_cache);
1317 target->state = TARGET_RUNNING;
1318 target->debug_reason = DBG_REASON_NOTHALTED;
1319 return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1323 * @par single_hart When true, only resume a single hart even if SMP is
1324 * configured. This is used to run algorithms on just one hart.
1327 struct target *target,
1329 target_addr_t address,
1330 int handle_breakpoints,
1331 int debug_execution,
1334 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
1335 int result = ERROR_OK;
1336 if (target->smp && !single_hart) {
1337 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1338 struct target *t = tlist->target;
1339 if (resume_prep(t, current, address, handle_breakpoints,
1340 debug_execution) != ERROR_OK)
1341 result = ERROR_FAIL;
1344 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1345 struct target *t = tlist->target;
1346 riscv_info_t *i = riscv_info(t);
1348 if (resume_go(t, current, address, handle_breakpoints,
1349 debug_execution) != ERROR_OK)
1350 result = ERROR_FAIL;
1354 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1355 struct target *t = tlist->target;
1356 if (resume_finish(t) != ERROR_OK)
1361 if (resume_prep(target, current, address, handle_breakpoints,
1362 debug_execution) != ERROR_OK)
1363 result = ERROR_FAIL;
1364 if (resume_go(target, current, address, handle_breakpoints,
1365 debug_execution) != ERROR_OK)
1366 result = ERROR_FAIL;
1367 if (resume_finish(target) != ERROR_OK)
1374 static int riscv_target_resume(struct target *target, int current, target_addr_t address,
1375 int handle_breakpoints, int debug_execution)
1377 return riscv_resume(target, current, address, handle_breakpoints,
1378 debug_execution, false);
1381 static int riscv_select_current_hart(struct target *target)
1384 if (riscv_rtos_enabled(target)) {
1385 if (r->rtos_hartid == -1)
1386 r->rtos_hartid = target->rtos->current_threadid - 1;
1387 return riscv_set_current_hartid(target, r->rtos_hartid);
1389 return riscv_set_current_hartid(target, target->coreid);
1392 static int riscv_mmu(struct target *target, int *enabled)
1394 if (!riscv_enable_virt2phys) {
1399 if (riscv_rtos_enabled(target))
1400 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1402 /* Don't use MMU in explicit or effective M (machine) mode */
1404 if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
1405 LOG_ERROR("Failed to read priv register.");
1409 riscv_reg_t mstatus;
1410 if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
1411 LOG_ERROR("Failed to read mstatus register.");
1415 if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
1416 LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
1422 if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
1423 LOG_DEBUG("Couldn't read SATP.");
1424 /* If we can't read SATP, then there must not be an MMU. */
1429 if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
1430 LOG_DEBUG("MMU is disabled.");
1433 LOG_DEBUG("MMU is enabled.");
1440 static int riscv_address_translate(struct target *target,
1441 target_addr_t virtual, target_addr_t *physical)
1444 riscv_reg_t satp_value;
1447 target_addr_t table_address;
1448 virt2phys_info_t *info;
1452 if (riscv_rtos_enabled(target))
1453 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1455 int result = riscv_get_register(target, &satp_value, GDB_REGNO_SATP);
1456 if (result != ERROR_OK)
1459 unsigned xlen = riscv_xlen(target);
1460 mode = get_field(satp_value, RISCV_SATP_MODE(xlen));
1462 case SATP_MODE_SV32:
1465 case SATP_MODE_SV39:
1468 case SATP_MODE_SV48:
1472 LOG_ERROR("No translation or protection." \
1473 " (satp: 0x%" PRIx64 ")", satp_value);
1476 LOG_ERROR("The translation mode is not supported." \
1477 " (satp: 0x%" PRIx64 ")", satp_value);
1480 LOG_DEBUG("virtual=0x%" TARGET_PRIxADDR "; mode=%s", virtual, info->name);
1482 /* verify bits xlen-1:va_bits-1 are all equal */
1483 target_addr_t mask = ((target_addr_t)1 << (xlen - (info->va_bits - 1))) - 1;
1484 target_addr_t masked_msbs = (virtual >> (info->va_bits - 1)) & mask;
1485 if (masked_msbs != 0 && masked_msbs != mask) {
1486 LOG_ERROR("Virtual address 0x%" TARGET_PRIxADDR " is not sign-extended "
1487 "for %s mode.", virtual, info->name);
1491 ppn_value = get_field(satp_value, RISCV_SATP_PPN(xlen));
1492 table_address = ppn_value << RISCV_PGSHIFT;
1493 i = info->level - 1;
1495 uint64_t vpn = virtual >> info->vpn_shift[i];
1496 vpn &= info->vpn_mask[i];
1497 target_addr_t pte_address = table_address +
1498 (vpn << info->pte_shift);
1500 assert(info->pte_shift <= 3);
1501 int retval = r->read_memory(target, pte_address,
1502 4, (1 << info->pte_shift) / 4, buffer, 4);
1503 if (retval != ERROR_OK)
1506 if (info->pte_shift == 2)
1507 pte = buf_get_u32(buffer, 0, 32);
1509 pte = buf_get_u64(buffer, 0, 64);
1511 LOG_DEBUG("i=%d; PTE @0x%" TARGET_PRIxADDR " = 0x%" PRIx64, i,
1514 if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W)))
1517 if ((pte & PTE_R) || (pte & PTE_X)) /* Found leaf PTE. */
1523 ppn_value = pte >> PTE_PPN_SHIFT;
1524 table_address = ppn_value << RISCV_PGSHIFT;
1528 LOG_ERROR("Couldn't find the PTE.");
1532 /* Make sure to clear out the high bits that may be set. */
1533 *physical = virtual & (((target_addr_t)1 << info->va_bits) - 1);
1535 while (i < info->level) {
1536 ppn_value = pte >> info->pte_ppn_shift[i];
1537 ppn_value &= info->pte_ppn_mask[i];
1538 *physical &= ~(((target_addr_t)info->pa_ppn_mask[i]) <<
1539 info->pa_ppn_shift[i]);
1540 *physical |= (ppn_value << info->pa_ppn_shift[i]);
1543 LOG_DEBUG("0x%" TARGET_PRIxADDR " -> 0x%" TARGET_PRIxADDR, virtual,
1549 static int riscv_virt2phys(struct target *target, target_addr_t virtual, target_addr_t *physical)
1552 if (riscv_mmu(target, &enabled) == ERROR_OK) {
1556 if (riscv_address_translate(target, virtual, physical) == ERROR_OK)
1563 static int riscv_read_phys_memory(struct target *target, target_addr_t phys_address,
1564 uint32_t size, uint32_t count, uint8_t *buffer)
1567 if (riscv_select_current_hart(target) != ERROR_OK)
1569 return r->read_memory(target, phys_address, size, count, buffer, size);
1572 static int riscv_read_memory(struct target *target, target_addr_t address,
1573 uint32_t size, uint32_t count, uint8_t *buffer)
1576 LOG_WARNING("0-length read from 0x%" TARGET_PRIxADDR, address);
1580 if (riscv_select_current_hart(target) != ERROR_OK)
1583 target_addr_t physical_addr;
1584 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1585 address = physical_addr;
1588 return r->read_memory(target, address, size, count, buffer, size);
1591 static int riscv_write_phys_memory(struct target *target, target_addr_t phys_address,
1592 uint32_t size, uint32_t count, const uint8_t *buffer)
1594 if (riscv_select_current_hart(target) != ERROR_OK)
1596 struct target_type *tt = get_target_type(target);
1597 return tt->write_memory(target, phys_address, size, count, buffer);
1600 static int riscv_write_memory(struct target *target, target_addr_t address,
1601 uint32_t size, uint32_t count, const uint8_t *buffer)
1604 LOG_WARNING("0-length write to 0x%" TARGET_PRIxADDR, address);
1608 if (riscv_select_current_hart(target) != ERROR_OK)
1611 target_addr_t physical_addr;
1612 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1613 address = physical_addr;
1615 struct target_type *tt = get_target_type(target);
1616 return tt->write_memory(target, address, size, count, buffer);
1619 static int riscv_get_gdb_reg_list_internal(struct target *target,
1620 struct reg **reg_list[], int *reg_list_size,
1621 enum target_register_class reg_class, bool read)
1624 LOG_DEBUG("rtos_hartid=%d, current_hartid=%d, reg_class=%d, read=%d",
1625 r->rtos_hartid, r->current_hartid, reg_class, read);
1627 if (!target->reg_cache) {
1628 LOG_ERROR("Target not initialized. Return ERROR_FAIL.");
1632 if (riscv_select_current_hart(target) != ERROR_OK)
1635 switch (reg_class) {
1636 case REG_CLASS_GENERAL:
1637 *reg_list_size = 33;
1640 *reg_list_size = target->reg_cache->num_regs;
1643 LOG_ERROR("Unsupported reg_class: %d", reg_class);
1647 *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
1651 for (int i = 0; i < *reg_list_size; i++) {
1652 assert(!target->reg_cache->reg_list[i].valid ||
1653 target->reg_cache->reg_list[i].size > 0);
1654 (*reg_list)[i] = &target->reg_cache->reg_list[i];
1656 target->reg_cache->reg_list[i].exist &&
1657 !target->reg_cache->reg_list[i].valid) {
1658 if (target->reg_cache->reg_list[i].type->get(
1659 &target->reg_cache->reg_list[i]) != ERROR_OK)
1667 static int riscv_get_gdb_reg_list_noread(struct target *target,
1668 struct reg **reg_list[], int *reg_list_size,
1669 enum target_register_class reg_class)
1671 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1675 static int riscv_get_gdb_reg_list(struct target *target,
1676 struct reg **reg_list[], int *reg_list_size,
1677 enum target_register_class reg_class)
1679 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1683 static int riscv_arch_state(struct target *target)
1685 struct target_type *tt = get_target_type(target);
1686 return tt->arch_state(target);
1689 /* Algorithm must end with a software breakpoint instruction. */
1690 static int riscv_run_algorithm(struct target *target, int num_mem_params,
1691 struct mem_param *mem_params, int num_reg_params,
1692 struct reg_param *reg_params, target_addr_t entry_point,
1693 target_addr_t exit_point, int timeout_ms, void *arch_info)
1695 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1696 int hartid = riscv_current_hartid(target);
1698 if (num_mem_params > 0) {
1699 LOG_ERROR("Memory parameters are not supported for RISC-V algorithms.");
1703 if (target->state != TARGET_HALTED) {
1704 LOG_WARNING("target not halted");
1705 return ERROR_TARGET_NOT_HALTED;
1708 /* Save registers */
1709 struct reg *reg_pc = register_get_by_name(target->reg_cache, "pc", 1);
1710 if (!reg_pc || reg_pc->type->get(reg_pc) != ERROR_OK)
1712 uint64_t saved_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1713 LOG_DEBUG("saved_pc=0x%" PRIx64, saved_pc);
1715 uint64_t saved_regs[32];
1716 for (int i = 0; i < num_reg_params; i++) {
1717 LOG_DEBUG("save %s", reg_params[i].reg_name);
1718 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1720 LOG_ERROR("Couldn't find register named '%s'", reg_params[i].reg_name);
1724 if (r->size != reg_params[i].size) {
1725 LOG_ERROR("Register %s is %d bits instead of %d bits.",
1726 reg_params[i].reg_name, r->size, reg_params[i].size);
1730 if (r->number > GDB_REGNO_XPR31) {
1731 LOG_ERROR("Only GPRs can be use as argument registers.");
1735 if (r->type->get(r) != ERROR_OK)
1737 saved_regs[r->number] = buf_get_u64(r->value, 0, r->size);
1739 if (reg_params[i].direction == PARAM_OUT || reg_params[i].direction == PARAM_IN_OUT) {
1740 if (r->type->set(r, reg_params[i].value) != ERROR_OK)
1746 /* Disable Interrupts before attempting to run the algorithm. */
1747 uint64_t current_mstatus;
1748 uint8_t mstatus_bytes[8] = { 0 };
1750 LOG_DEBUG("Disabling Interrupts");
1751 struct reg *reg_mstatus = register_get_by_name(target->reg_cache,
1754 LOG_ERROR("Couldn't find mstatus!");
1758 reg_mstatus->type->get(reg_mstatus);
1759 current_mstatus = buf_get_u64(reg_mstatus->value, 0, reg_mstatus->size);
1760 uint64_t ie_mask = MSTATUS_MIE | MSTATUS_HIE | MSTATUS_SIE | MSTATUS_UIE;
1761 buf_set_u64(mstatus_bytes, 0, info->xlen[0], set_field(current_mstatus,
1764 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1767 LOG_DEBUG("resume at 0x%" TARGET_PRIxADDR, entry_point);
1768 if (riscv_resume(target, 0, entry_point, 0, 0, true) != ERROR_OK)
1771 int64_t start = timeval_ms();
1772 while (target->state != TARGET_HALTED) {
1773 LOG_DEBUG("poll()");
1774 int64_t now = timeval_ms();
1775 if (now - start > timeout_ms) {
1776 LOG_ERROR("Algorithm timed out after %" PRId64 " ms.", now - start);
1778 old_or_new_riscv_poll(target);
1779 enum gdb_regno regnums[] = {
1780 GDB_REGNO_RA, GDB_REGNO_SP, GDB_REGNO_GP, GDB_REGNO_TP,
1781 GDB_REGNO_T0, GDB_REGNO_T1, GDB_REGNO_T2, GDB_REGNO_FP,
1782 GDB_REGNO_S1, GDB_REGNO_A0, GDB_REGNO_A1, GDB_REGNO_A2,
1783 GDB_REGNO_A3, GDB_REGNO_A4, GDB_REGNO_A5, GDB_REGNO_A6,
1784 GDB_REGNO_A7, GDB_REGNO_S2, GDB_REGNO_S3, GDB_REGNO_S4,
1785 GDB_REGNO_S5, GDB_REGNO_S6, GDB_REGNO_S7, GDB_REGNO_S8,
1786 GDB_REGNO_S9, GDB_REGNO_S10, GDB_REGNO_S11, GDB_REGNO_T3,
1787 GDB_REGNO_T4, GDB_REGNO_T5, GDB_REGNO_T6,
1789 GDB_REGNO_MSTATUS, GDB_REGNO_MEPC, GDB_REGNO_MCAUSE,
1791 for (unsigned i = 0; i < DIM(regnums); i++) {
1792 enum gdb_regno regno = regnums[i];
1793 riscv_reg_t reg_value;
1794 if (riscv_get_register(target, ®_value, regno) != ERROR_OK)
1796 LOG_ERROR("%s = 0x%" PRIx64, gdb_regno_name(regno), reg_value);
1798 return ERROR_TARGET_TIMEOUT;
1801 int result = old_or_new_riscv_poll(target);
1802 if (result != ERROR_OK)
1806 /* The current hart id might have been changed in poll(). */
1807 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1810 if (reg_pc->type->get(reg_pc) != ERROR_OK)
1812 uint64_t final_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1813 if (exit_point && final_pc != exit_point) {
1814 LOG_ERROR("PC ended up at 0x%" PRIx64 " instead of 0x%"
1815 TARGET_PRIxADDR, final_pc, exit_point);
1819 /* Restore Interrupts */
1820 LOG_DEBUG("Restoring Interrupts");
1821 buf_set_u64(mstatus_bytes, 0, info->xlen[0], current_mstatus);
1822 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1824 /* Restore registers */
1825 uint8_t buf[8] = { 0 };
1826 buf_set_u64(buf, 0, info->xlen[0], saved_pc);
1827 if (reg_pc->type->set(reg_pc, buf) != ERROR_OK)
1830 for (int i = 0; i < num_reg_params; i++) {
1831 if (reg_params[i].direction == PARAM_IN ||
1832 reg_params[i].direction == PARAM_IN_OUT) {
1833 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1834 if (r->type->get(r) != ERROR_OK) {
1835 LOG_ERROR("get(%s) failed", r->name);
1838 buf_cpy(r->value, reg_params[i].value, reg_params[i].size);
1840 LOG_DEBUG("restore %s", reg_params[i].reg_name);
1841 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1842 buf_set_u64(buf, 0, info->xlen[0], saved_regs[r->number]);
1843 if (r->type->set(r, buf) != ERROR_OK) {
1844 LOG_ERROR("set(%s) failed", r->name);
1852 static int riscv_checksum_memory(struct target *target,
1853 target_addr_t address, uint32_t count,
1859 /*** OpenOCD Helper Functions ***/
1861 enum riscv_poll_hart {
1863 RPH_DISCOVERED_HALTED,
1864 RPH_DISCOVERED_RUNNING,
1867 static enum riscv_poll_hart riscv_poll_hart(struct target *target, int hartid)
1870 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1873 LOG_DEBUG("polling hart %d, target->state=%d", hartid, target->state);
1875 /* If OpenOCD thinks we're running but this hart is halted then it's time
1876 * to raise an event. */
1877 bool halted = riscv_is_halted(target);
1878 if (target->state != TARGET_HALTED && halted) {
1879 LOG_DEBUG(" triggered a halt");
1881 return RPH_DISCOVERED_HALTED;
1882 } else if (target->state != TARGET_RUNNING && !halted) {
1883 LOG_DEBUG(" triggered running");
1884 target->state = TARGET_RUNNING;
1885 target->debug_reason = DBG_REASON_NOTHALTED;
1886 return RPH_DISCOVERED_RUNNING;
1889 return RPH_NO_CHANGE;
1892 int set_debug_reason(struct target *target, enum riscv_halt_reason halt_reason)
1894 switch (halt_reason) {
1895 case RISCV_HALT_BREAKPOINT:
1896 target->debug_reason = DBG_REASON_BREAKPOINT;
1898 case RISCV_HALT_TRIGGER:
1899 target->debug_reason = DBG_REASON_WATCHPOINT;
1901 case RISCV_HALT_INTERRUPT:
1902 case RISCV_HALT_GROUP:
1903 target->debug_reason = DBG_REASON_DBGRQ;
1905 case RISCV_HALT_SINGLESTEP:
1906 target->debug_reason = DBG_REASON_SINGLESTEP;
1908 case RISCV_HALT_UNKNOWN:
1909 target->debug_reason = DBG_REASON_UNDEFINED;
1911 case RISCV_HALT_ERROR:
1914 LOG_DEBUG("[%s] debug_reason=%d", target_name(target), target->debug_reason);
1918 /*** OpenOCD Interface ***/
1919 int riscv_openocd_poll(struct target *target)
1921 LOG_DEBUG("polling all harts");
1922 int halted_hart = -1;
1923 if (riscv_rtos_enabled(target)) {
1924 /* Check every hart for an event. */
1925 for (int i = 0; i < riscv_count_harts(target); ++i) {
1926 enum riscv_poll_hart out = riscv_poll_hart(target, i);
1929 case RPH_DISCOVERED_RUNNING:
1931 case RPH_DISCOVERED_HALTED:
1938 if (halted_hart == -1) {
1939 LOG_DEBUG(" no harts just halted, target->state=%d", target->state);
1942 LOG_DEBUG(" hart %d halted", halted_hart);
1944 target->state = TARGET_HALTED;
1945 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
1946 if (set_debug_reason(target, halt_reason) != ERROR_OK)
1949 target->rtos->current_threadid = halted_hart + 1;
1950 target->rtos->current_thread = halted_hart + 1;
1951 riscv_set_rtos_hartid(target, halted_hart);
1953 /* If we're here then at least one hart triggered. That means we want
1954 * to go and halt _every_ hart (configured with -rtos riscv) in the
1955 * system, as that's the invariant we hold here. Some harts might have
1956 * already halted (as we're either in single-step mode or they also
1957 * triggered a breakpoint), so don't attempt to halt those harts.
1958 * riscv_halt() will do all that for us. */
1961 } else if (target->smp) {
1962 unsigned halts_discovered = 0;
1963 unsigned total_targets = 0;
1964 bool newly_halted[RISCV_MAX_HARTS] = {0};
1965 unsigned should_remain_halted = 0;
1966 unsigned should_resume = 0;
1968 for (struct target_list *list = target->head; list != NULL;
1969 list = list->next, i++) {
1971 struct target *t = list->target;
1972 riscv_info_t *r = riscv_info(t);
1973 assert(i < DIM(newly_halted));
1974 enum riscv_poll_hart out = riscv_poll_hart(t, r->current_hartid);
1978 case RPH_DISCOVERED_RUNNING:
1979 t->state = TARGET_RUNNING;
1980 t->debug_reason = DBG_REASON_NOTHALTED;
1982 case RPH_DISCOVERED_HALTED:
1984 newly_halted[i] = true;
1985 t->state = TARGET_HALTED;
1986 enum riscv_halt_reason halt_reason =
1987 riscv_halt_reason(t, r->current_hartid);
1988 if (set_debug_reason(t, halt_reason) != ERROR_OK)
1991 if (halt_reason == RISCV_HALT_BREAKPOINT) {
1993 switch (riscv_semihosting(t, &retval)) {
1996 /* This hart should remain halted. */
1997 should_remain_halted++;
2000 /* This hart should be resumed, along with any other
2001 * harts that halted due to haltgroups. */
2007 } else if (halt_reason != RISCV_HALT_GROUP) {
2008 should_remain_halted++;
2017 LOG_DEBUG("should_remain_halted=%d, should_resume=%d",
2018 should_remain_halted, should_resume);
2019 if (should_remain_halted && should_resume) {
2020 LOG_WARNING("%d harts should remain halted, and %d should resume.",
2021 should_remain_halted, should_resume);
2023 if (should_remain_halted) {
2024 LOG_DEBUG("halt all");
2026 } else if (should_resume) {
2027 LOG_DEBUG("resume all");
2028 riscv_resume(target, true, 0, 0, 0, false);
2033 enum riscv_poll_hart out = riscv_poll_hart(target,
2034 riscv_current_hartid(target));
2035 if (out == RPH_NO_CHANGE || out == RPH_DISCOVERED_RUNNING)
2037 else if (out == RPH_ERROR)
2040 halted_hart = riscv_current_hartid(target);
2041 LOG_DEBUG(" hart %d halted", halted_hart);
2043 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
2044 if (set_debug_reason(target, halt_reason) != ERROR_OK)
2046 target->state = TARGET_HALTED;
2049 if (target->debug_reason == DBG_REASON_BREAKPOINT) {
2051 switch (riscv_semihosting(target, &retval)) {
2054 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2057 if (riscv_resume(target, true, 0, 0, 0, false) != ERROR_OK)
2064 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2070 int riscv_openocd_step(struct target *target, int current,
2071 target_addr_t address, int handle_breakpoints)
2073 LOG_DEBUG("stepping rtos hart");
2076 riscv_set_register(target, GDB_REGNO_PC, address);
2078 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
2079 if (disable_triggers(target, trigger_state) != ERROR_OK)
2082 int out = riscv_step_rtos_hart(target);
2083 if (out != ERROR_OK) {
2084 LOG_ERROR("unable to step rtos hart");
2088 register_cache_invalidate(target->reg_cache);
2090 if (enable_triggers(target, trigger_state) != ERROR_OK)
2093 target->state = TARGET_RUNNING;
2094 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
2095 target->state = TARGET_HALTED;
2096 target->debug_reason = DBG_REASON_SINGLESTEP;
2097 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2101 /* Command Handlers */
2102 COMMAND_HANDLER(riscv_set_command_timeout_sec)
2104 if (CMD_ARGC != 1) {
2105 LOG_ERROR("Command takes exactly 1 parameter");
2106 return ERROR_COMMAND_SYNTAX_ERROR;
2108 int timeout = atoi(CMD_ARGV[0]);
2110 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2114 riscv_command_timeout_sec = timeout;
2119 COMMAND_HANDLER(riscv_set_reset_timeout_sec)
2121 if (CMD_ARGC != 1) {
2122 LOG_ERROR("Command takes exactly 1 parameter");
2123 return ERROR_COMMAND_SYNTAX_ERROR;
2125 int timeout = atoi(CMD_ARGV[0]);
2127 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2131 riscv_reset_timeout_sec = timeout;
2135 COMMAND_HANDLER(riscv_test_compliance) {
2137 struct target *target = get_current_target(CMD_CTX);
2142 LOG_ERROR("Command does not take any parameters.");
2143 return ERROR_COMMAND_SYNTAX_ERROR;
2146 if (r->test_compliance) {
2147 return r->test_compliance(target);
2149 LOG_ERROR("This target does not support this command (may implement an older version of the spec).");
2154 COMMAND_HANDLER(riscv_set_prefer_sba)
2156 if (CMD_ARGC != 1) {
2157 LOG_ERROR("Command takes exactly 1 parameter");
2158 return ERROR_COMMAND_SYNTAX_ERROR;
2160 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_prefer_sba);
2164 COMMAND_HANDLER(riscv_set_enable_virtual)
2166 if (CMD_ARGC != 1) {
2167 LOG_ERROR("Command takes exactly 1 parameter");
2168 return ERROR_COMMAND_SYNTAX_ERROR;
2170 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virtual);
2174 void parse_error(const char *string, char c, unsigned position)
2176 char buf[position+2];
2177 for (unsigned i = 0; i < position; i++)
2179 buf[position] = '^';
2180 buf[position + 1] = 0;
2182 LOG_ERROR("Parse error at character %c in:", c);
2183 LOG_ERROR("%s", string);
2184 LOG_ERROR("%s", buf);
2187 int parse_ranges(range_t **ranges, const char **argv)
2189 for (unsigned pass = 0; pass < 2; pass++) {
2192 bool parse_low = true;
2194 for (unsigned i = 0; i == 0 || argv[0][i-1]; i++) {
2195 char c = argv[0][i];
2197 /* Ignore whitespace. */
2205 } else if (c == '-') {
2207 } else if (c == ',' || c == 0) {
2209 (*ranges)[range].low = low;
2210 (*ranges)[range].high = low;
2215 parse_error(argv[0], c, i);
2216 return ERROR_COMMAND_SYNTAX_ERROR;
2223 } else if (c == ',' || c == 0) {
2226 (*ranges)[range].low = low;
2227 (*ranges)[range].high = high;
2233 parse_error(argv[0], c, i);
2234 return ERROR_COMMAND_SYNTAX_ERROR;
2241 *ranges = calloc(range + 2, sizeof(range_t));
2245 (*ranges)[range].low = 1;
2246 (*ranges)[range].high = 0;
2253 COMMAND_HANDLER(riscv_set_expose_csrs)
2255 if (CMD_ARGC != 1) {
2256 LOG_ERROR("Command takes exactly 1 parameter");
2257 return ERROR_COMMAND_SYNTAX_ERROR;
2260 return parse_ranges(&expose_csr, CMD_ARGV);
2263 COMMAND_HANDLER(riscv_set_expose_custom)
2265 if (CMD_ARGC != 1) {
2266 LOG_ERROR("Command takes exactly 1 parameter");
2267 return ERROR_COMMAND_SYNTAX_ERROR;
2270 return parse_ranges(&expose_custom, CMD_ARGV);
2273 COMMAND_HANDLER(riscv_authdata_read)
2275 if (CMD_ARGC != 0) {
2276 LOG_ERROR("Command takes no parameters");
2277 return ERROR_COMMAND_SYNTAX_ERROR;
2280 struct target *target = get_current_target(CMD_CTX);
2282 LOG_ERROR("target is NULL!");
2288 LOG_ERROR("riscv_info is NULL!");
2292 if (r->authdata_read) {
2294 if (r->authdata_read(target, &value) != ERROR_OK)
2296 command_print(CMD, "0x%" PRIx32, value);
2299 LOG_ERROR("authdata_read is not implemented for this target.");
2304 COMMAND_HANDLER(riscv_authdata_write)
2306 if (CMD_ARGC != 1) {
2307 LOG_ERROR("Command takes exactly 1 argument");
2308 return ERROR_COMMAND_SYNTAX_ERROR;
2311 struct target *target = get_current_target(CMD_CTX);
2315 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], value);
2317 if (r->authdata_write) {
2318 return r->authdata_write(target, value);
2320 LOG_ERROR("authdata_write is not implemented for this target.");
2325 COMMAND_HANDLER(riscv_dmi_read)
2327 if (CMD_ARGC != 1) {
2328 LOG_ERROR("Command takes 1 parameter");
2329 return ERROR_COMMAND_SYNTAX_ERROR;
2332 struct target *target = get_current_target(CMD_CTX);
2334 LOG_ERROR("target is NULL!");
2340 LOG_ERROR("riscv_info is NULL!");
2345 uint32_t address, value;
2346 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2347 if (r->dmi_read(target, &value, address) != ERROR_OK)
2349 command_print(CMD, "0x%" PRIx32, value);
2352 LOG_ERROR("dmi_read is not implemented for this target.");
2358 COMMAND_HANDLER(riscv_dmi_write)
2360 if (CMD_ARGC != 2) {
2361 LOG_ERROR("Command takes exactly 2 arguments");
2362 return ERROR_COMMAND_SYNTAX_ERROR;
2365 struct target *target = get_current_target(CMD_CTX);
2368 uint32_t address, value;
2369 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2370 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2373 return r->dmi_write(target, address, value);
2375 LOG_ERROR("dmi_write is not implemented for this target.");
2380 COMMAND_HANDLER(riscv_test_sba_config_reg)
2382 if (CMD_ARGC != 4) {
2383 LOG_ERROR("Command takes exactly 4 arguments");
2384 return ERROR_COMMAND_SYNTAX_ERROR;
2387 struct target *target = get_current_target(CMD_CTX);
2390 target_addr_t legal_address;
2392 target_addr_t illegal_address;
2393 bool run_sbbusyerror_test;
2395 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[0], legal_address);
2396 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], num_words);
2397 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[2], illegal_address);
2398 COMMAND_PARSE_ON_OFF(CMD_ARGV[3], run_sbbusyerror_test);
2400 if (r->test_sba_config_reg) {
2401 return r->test_sba_config_reg(target, legal_address, num_words,
2402 illegal_address, run_sbbusyerror_test);
2404 LOG_ERROR("test_sba_config_reg is not implemented for this target.");
2409 COMMAND_HANDLER(riscv_reset_delays)
2414 LOG_ERROR("Command takes at most one argument");
2415 return ERROR_COMMAND_SYNTAX_ERROR;
2419 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], wait);
2421 struct target *target = get_current_target(CMD_CTX);
2423 r->reset_delays_wait = wait;
2427 COMMAND_HANDLER(riscv_set_ir)
2429 if (CMD_ARGC != 2) {
2430 LOG_ERROR("Command takes exactly 2 arguments");
2431 return ERROR_COMMAND_SYNTAX_ERROR;
2435 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2437 if (!strcmp(CMD_ARGV[0], "idcode"))
2438 buf_set_u32(ir_idcode, 0, 32, value);
2439 else if (!strcmp(CMD_ARGV[0], "dtmcs"))
2440 buf_set_u32(ir_dtmcontrol, 0, 32, value);
2441 else if (!strcmp(CMD_ARGV[0], "dmi"))
2442 buf_set_u32(ir_dbus, 0, 32, value);
2449 COMMAND_HANDLER(riscv_resume_order)
2452 LOG_ERROR("Command takes at most one argument");
2453 return ERROR_COMMAND_SYNTAX_ERROR;
2456 if (!strcmp(CMD_ARGV[0], "normal")) {
2457 resume_order = RO_NORMAL;
2458 } else if (!strcmp(CMD_ARGV[0], "reversed")) {
2459 resume_order = RO_REVERSED;
2461 LOG_ERROR("Unsupported resume order: %s", CMD_ARGV[0]);
2468 COMMAND_HANDLER(riscv_use_bscan_tunnel)
2471 int tunnel_type = BSCAN_TUNNEL_NESTED_TAP;
2474 LOG_ERROR("Command takes at most two arguments");
2475 return ERROR_COMMAND_SYNTAX_ERROR;
2476 } else if (CMD_ARGC == 1) {
2477 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2478 } else if (CMD_ARGC == 2) {
2479 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2480 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], tunnel_type);
2482 if (tunnel_type == BSCAN_TUNNEL_NESTED_TAP)
2483 LOG_INFO("Nested Tap based Bscan Tunnel Selected");
2484 else if (tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
2485 LOG_INFO("Simple Register based Bscan Tunnel Selected");
2487 LOG_INFO("Invalid Tunnel type selected ! : selecting default Nested Tap Type");
2489 bscan_tunnel_type = tunnel_type;
2490 bscan_tunnel_ir_width = irwidth;
2494 COMMAND_HANDLER(riscv_set_enable_virt2phys)
2496 if (CMD_ARGC != 1) {
2497 LOG_ERROR("Command takes exactly 1 parameter");
2498 return ERROR_COMMAND_SYNTAX_ERROR;
2500 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virt2phys);
2504 COMMAND_HANDLER(riscv_set_ebreakm)
2506 if (CMD_ARGC != 1) {
2507 LOG_ERROR("Command takes exactly 1 parameter");
2508 return ERROR_COMMAND_SYNTAX_ERROR;
2510 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreakm);
2514 COMMAND_HANDLER(riscv_set_ebreaks)
2516 if (CMD_ARGC != 1) {
2517 LOG_ERROR("Command takes exactly 1 parameter");
2518 return ERROR_COMMAND_SYNTAX_ERROR;
2520 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaks);
2524 COMMAND_HANDLER(riscv_set_ebreaku)
2526 if (CMD_ARGC != 1) {
2527 LOG_ERROR("Command takes exactly 1 parameter");
2528 return ERROR_COMMAND_SYNTAX_ERROR;
2530 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaku);
2534 static const struct command_registration riscv_exec_command_handlers[] = {
2536 .name = "test_compliance",
2537 .handler = riscv_test_compliance,
2539 .mode = COMMAND_EXEC,
2540 .help = "Runs a basic compliance test suite against the RISC-V Debug Spec."
2543 .name = "set_command_timeout_sec",
2544 .handler = riscv_set_command_timeout_sec,
2545 .mode = COMMAND_ANY,
2547 .help = "Set the wall-clock timeout (in seconds) for individual commands"
2550 .name = "set_reset_timeout_sec",
2551 .handler = riscv_set_reset_timeout_sec,
2552 .mode = COMMAND_ANY,
2554 .help = "Set the wall-clock timeout (in seconds) after reset is deasserted"
2557 .name = "set_prefer_sba",
2558 .handler = riscv_set_prefer_sba,
2559 .mode = COMMAND_ANY,
2561 .help = "When on, prefer to use System Bus Access to access memory. "
2562 "When off (default), prefer to use the Program Buffer to access memory."
2565 .name = "set_enable_virtual",
2566 .handler = riscv_set_enable_virtual,
2567 .mode = COMMAND_ANY,
2569 .help = "When on, memory accesses are performed on physical or virtual "
2570 "memory depending on the current system configuration. "
2571 "When off (default), all memory accessses are performed on physical memory."
2574 .name = "expose_csrs",
2575 .handler = riscv_set_expose_csrs,
2576 .mode = COMMAND_ANY,
2577 .usage = "n0[-m0][,n1[-m1]]...",
2578 .help = "Configure a list of inclusive ranges for CSRs to expose in "
2579 "addition to the standard ones. This must be executed before "
2583 .name = "expose_custom",
2584 .handler = riscv_set_expose_custom,
2585 .mode = COMMAND_ANY,
2586 .usage = "n0[-m0][,n1[-m1]]...",
2587 .help = "Configure a list of inclusive ranges for custom registers to "
2588 "expose. custom0 is accessed as abstract register number 0xc000, "
2589 "etc. This must be executed before `init`."
2592 .name = "authdata_read",
2593 .handler = riscv_authdata_read,
2595 .mode = COMMAND_ANY,
2596 .help = "Return the 32-bit value read from authdata."
2599 .name = "authdata_write",
2600 .handler = riscv_authdata_write,
2601 .mode = COMMAND_ANY,
2603 .help = "Write the 32-bit value to authdata."
2607 .handler = riscv_dmi_read,
2608 .mode = COMMAND_ANY,
2610 .help = "Perform a 32-bit DMI read at address, returning the value."
2613 .name = "dmi_write",
2614 .handler = riscv_dmi_write,
2615 .mode = COMMAND_ANY,
2616 .usage = "address value",
2617 .help = "Perform a 32-bit DMI write of value at address."
2620 .name = "test_sba_config_reg",
2621 .handler = riscv_test_sba_config_reg,
2622 .mode = COMMAND_ANY,
2623 .usage = "legal_address num_words "
2624 "illegal_address run_sbbusyerror_test[on/off]",
2625 .help = "Perform a series of tests on the SBCS register. "
2626 "Inputs are a legal, 128-byte aligned address and a number of words to "
2627 "read/write starting at that address (i.e., address range [legal address, "
2628 "legal_address+word_size*num_words) must be legally readable/writable), "
2629 "an illegal, 128-byte aligned address for error flag/handling cases, "
2630 "and whether sbbusyerror test should be run."
2633 .name = "reset_delays",
2634 .handler = riscv_reset_delays,
2635 .mode = COMMAND_ANY,
2637 .help = "OpenOCD learns how many Run-Test/Idle cycles are required "
2638 "between scans to avoid encountering the target being busy. This "
2639 "command resets those learned values after `wait` scans. It's only "
2640 "useful for testing OpenOCD itself."
2643 .name = "resume_order",
2644 .handler = riscv_resume_order,
2645 .mode = COMMAND_ANY,
2646 .usage = "normal|reversed",
2647 .help = "Choose the order that harts are resumed in when `hasel` is not "
2648 "supported. Normal order is from lowest hart index to highest. "
2649 "Reversed order is from highest hart index to lowest."
2653 .handler = riscv_set_ir,
2654 .mode = COMMAND_ANY,
2655 .usage = "[idcode|dtmcs|dmi] value",
2656 .help = "Set IR value for specified JTAG register."
2659 .name = "use_bscan_tunnel",
2660 .handler = riscv_use_bscan_tunnel,
2661 .mode = COMMAND_ANY,
2662 .usage = "value [type]",
2663 .help = "Enable or disable use of a BSCAN tunnel to reach DM. Supply "
2664 "the width of the DM transport TAP's instruction register to "
2665 "enable. Supply a value of 0 to disable. Pass A second argument "
2666 "(optional) to indicate Bscan Tunnel Type {0:(default) NESTED_TAP , "
2670 .name = "set_enable_virt2phys",
2671 .handler = riscv_set_enable_virt2phys,
2672 .mode = COMMAND_ANY,
2674 .help = "When on (default), enable translation from virtual address to "
2678 .name = "set_ebreakm",
2679 .handler = riscv_set_ebreakm,
2680 .mode = COMMAND_ANY,
2682 .help = "Control dcsr.ebreakm. When off, M-mode ebreak instructions "
2683 "don't trap to OpenOCD. Defaults to on."
2686 .name = "set_ebreaks",
2687 .handler = riscv_set_ebreaks,
2688 .mode = COMMAND_ANY,
2690 .help = "Control dcsr.ebreaks. When off, S-mode ebreak instructions "
2691 "don't trap to OpenOCD. Defaults to on."
2694 .name = "set_ebreaku",
2695 .handler = riscv_set_ebreaku,
2696 .mode = COMMAND_ANY,
2698 .help = "Control dcsr.ebreaku. When off, U-mode ebreak instructions "
2699 "don't trap to OpenOCD. Defaults to on."
2701 COMMAND_REGISTRATION_DONE
2705 * To be noted that RISC-V targets use the same semihosting commands as
2708 * The main reason is compatibility with existing tools. For example the
2709 * Eclipse OpenOCD/SEGGER J-Link/QEMU plug-ins have several widgets to
2710 * configure semihosting, which generate commands like `arm semihosting
2712 * A secondary reason is the fact that the protocol used is exactly the
2713 * one specified by ARM. If RISC-V will ever define its own semihosting
2714 * protocol, then a command like `riscv semihosting enable` will make
2715 * sense, but for now all semihosting commands are prefixed with `arm`.
2717 extern const struct command_registration semihosting_common_handlers[];
2719 const struct command_registration riscv_command_handlers[] = {
2722 .mode = COMMAND_ANY,
2723 .help = "RISC-V Command Group",
2725 .chain = riscv_exec_command_handlers
2729 .mode = COMMAND_ANY,
2730 .help = "ARM Command Group",
2732 .chain = semihosting_common_handlers
2734 COMMAND_REGISTRATION_DONE
2737 static unsigned riscv_xlen_nonconst(struct target *target)
2739 return riscv_xlen(target);
2742 struct target_type riscv_target = {
2745 .init_target = riscv_init_target,
2746 .deinit_target = riscv_deinit_target,
2747 .examine = riscv_examine,
2749 /* poll current target status */
2750 .poll = old_or_new_riscv_poll,
2753 .resume = riscv_target_resume,
2754 .step = old_or_new_riscv_step,
2756 .assert_reset = riscv_assert_reset,
2757 .deassert_reset = riscv_deassert_reset,
2759 .read_memory = riscv_read_memory,
2760 .write_memory = riscv_write_memory,
2761 .read_phys_memory = riscv_read_phys_memory,
2762 .write_phys_memory = riscv_write_phys_memory,
2764 .checksum_memory = riscv_checksum_memory,
2767 .virt2phys = riscv_virt2phys,
2769 .get_gdb_reg_list = riscv_get_gdb_reg_list,
2770 .get_gdb_reg_list_noread = riscv_get_gdb_reg_list_noread,
2772 .add_breakpoint = riscv_add_breakpoint,
2773 .remove_breakpoint = riscv_remove_breakpoint,
2775 .add_watchpoint = riscv_add_watchpoint,
2776 .remove_watchpoint = riscv_remove_watchpoint,
2777 .hit_watchpoint = riscv_hit_watchpoint,
2779 .arch_state = riscv_arch_state,
2781 .run_algorithm = riscv_run_algorithm,
2783 .commands = riscv_command_handlers,
2785 .address_bits = riscv_xlen_nonconst,
2788 /*** RISC-V Interface ***/
2790 void riscv_info_init(struct target *target, riscv_info_t *r)
2792 memset(r, 0, sizeof(*r));
2794 r->registers_initialized = false;
2795 r->current_hartid = target->coreid;
2797 memset(r->trigger_unique_id, 0xff, sizeof(r->trigger_unique_id));
2799 for (size_t h = 0; h < RISCV_MAX_HARTS; ++h)
2803 static int riscv_resume_go_all_harts(struct target *target)
2807 /* Dummy variables to make mingw32-gcc happy. */
2811 switch (resume_order) {
2814 last = riscv_count_harts(target) - 1;
2818 first = riscv_count_harts(target) - 1;
2826 for (int i = first; i != last + step; i += step) {
2827 if (!riscv_hart_enabled(target, i))
2830 LOG_DEBUG("resuming hart %d", i);
2831 if (riscv_set_current_hartid(target, i) != ERROR_OK)
2833 if (riscv_is_halted(target)) {
2834 if (r->resume_go(target) != ERROR_OK)
2837 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
2841 riscv_invalidate_register_cache(target);
2845 int riscv_step_rtos_hart(struct target *target)
2848 int hartid = r->current_hartid;
2849 if (riscv_rtos_enabled(target)) {
2850 hartid = r->rtos_hartid;
2852 LOG_DEBUG("GDB has asked me to step \"any\" thread, so I'm stepping hart 0.");
2856 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
2858 LOG_DEBUG("stepping hart %d", hartid);
2860 if (!riscv_is_halted(target)) {
2861 LOG_ERROR("Hart isn't halted before single step!");
2864 riscv_invalidate_register_cache(target);
2866 if (r->step_current_hart(target) != ERROR_OK)
2868 riscv_invalidate_register_cache(target);
2870 if (!riscv_is_halted(target)) {
2871 LOG_ERROR("Hart was not halted after single step!");
2877 bool riscv_supports_extension(struct target *target, int hartid, char letter)
2881 if (letter >= 'a' && letter <= 'z')
2883 else if (letter >= 'A' && letter <= 'Z')
2887 return r->misa[hartid] & (1 << num);
2890 unsigned riscv_xlen(const struct target *target)
2892 return riscv_xlen_of_hart(target, riscv_current_hartid(target));
2895 int riscv_xlen_of_hart(const struct target *target, int hartid)
2898 assert(r->xlen[hartid] != -1);
2899 return r->xlen[hartid];
2902 bool riscv_rtos_enabled(const struct target *target)
2907 int riscv_set_current_hartid(struct target *target, int hartid)
2910 if (!r->select_current_hart)
2913 int previous_hartid = riscv_current_hartid(target);
2914 r->current_hartid = hartid;
2915 assert(riscv_hart_enabled(target, hartid));
2916 LOG_DEBUG("setting hartid to %d, was %d", hartid, previous_hartid);
2917 if (r->select_current_hart(target) != ERROR_OK)
2920 /* This might get called during init, in which case we shouldn't be
2921 * setting up the register cache. */
2922 if (target_was_examined(target) && riscv_rtos_enabled(target))
2923 riscv_invalidate_register_cache(target);
2928 void riscv_invalidate_register_cache(struct target *target)
2932 LOG_DEBUG("[%d]", target->coreid);
2933 register_cache_invalidate(target->reg_cache);
2934 for (size_t i = 0; i < target->reg_cache->num_regs; ++i) {
2935 struct reg *reg = &target->reg_cache->reg_list[i];
2939 r->registers_initialized = true;
2942 int riscv_current_hartid(const struct target *target)
2945 return r->current_hartid;
2948 void riscv_set_all_rtos_harts(struct target *target)
2951 r->rtos_hartid = -1;
2954 void riscv_set_rtos_hartid(struct target *target, int hartid)
2956 LOG_DEBUG("setting RTOS hartid %d", hartid);
2958 r->rtos_hartid = hartid;
2961 int riscv_count_harts(struct target *target)
2966 if (r == NULL || r->hart_count == NULL)
2968 return r->hart_count(target);
2971 bool riscv_has_register(struct target *target, int hartid, int regid)
2978 * return true iff we are guaranteed that the register will contain exactly
2979 * the value we just wrote when it's read.
2980 * If write is false:
2981 * return true iff we are guaranteed that the register will read the same
2982 * value in the future as the value we just read.
2984 static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
2986 /* GPRs, FPRs, vector registers are just normal data stores. */
2987 if (regno <= GDB_REGNO_XPR31 ||
2988 (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31) ||
2989 (regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31))
2992 /* Most CSRs won't change value on us, but we can't assume it about rbitrary
2998 case GDB_REGNO_VSTART:
2999 case GDB_REGNO_VXSAT:
3000 case GDB_REGNO_VXRM:
3001 case GDB_REGNO_VLENB:
3003 case GDB_REGNO_VTYPE:
3004 case GDB_REGNO_MISA:
3005 case GDB_REGNO_DCSR:
3006 case GDB_REGNO_DSCRATCH0:
3007 case GDB_REGNO_MSTATUS:
3008 case GDB_REGNO_MEPC:
3009 case GDB_REGNO_MCAUSE:
3010 case GDB_REGNO_SATP:
3012 * WARL registers might not contain the value we just wrote, but
3013 * these ones won't spontaneously change their value either. *
3017 case GDB_REGNO_TSELECT: /* I think this should be above, but then it doesn't work. */
3018 case GDB_REGNO_TDATA1: /* Changes value when tselect is changed. */
3019 case GDB_REGNO_TDATA2: /* Changse value when tselect is changed. */
3026 * This function is called when the debug user wants to change the value of a
3027 * register. The new value may be cached, and may not be written until the hart
3029 int riscv_set_register(struct target *target, enum gdb_regno r, riscv_reg_t v)
3031 return riscv_set_register_on_hart(target, riscv_current_hartid(target), r, v);
3034 int riscv_set_register_on_hart(struct target *target, int hartid,
3035 enum gdb_regno regid, uint64_t value)
3038 LOG_DEBUG("{%d} %s <- %" PRIx64, hartid, gdb_regno_name(regid), value);
3039 assert(r->set_register);
3041 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3042 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 && value == 0 &&
3043 riscv_supports_extension(target, hartid, 'E'))
3046 struct reg *reg = &target->reg_cache->reg_list[regid];
3047 buf_set_u64(reg->value, 0, reg->size, value);
3049 int result = r->set_register(target, hartid, regid, value);
3050 if (result == ERROR_OK)
3051 reg->valid = gdb_regno_cacheable(regid, true);
3054 LOG_DEBUG("[%s]{%d} wrote 0x%" PRIx64 " to %s valid=%d",
3055 target_name(target), hartid, value, reg->name, reg->valid);
3059 int riscv_get_register(struct target *target, riscv_reg_t *value,
3062 return riscv_get_register_on_hart(target, value,
3063 riscv_current_hartid(target), r);
3066 int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
3067 int hartid, enum gdb_regno regid)
3071 struct reg *reg = &target->reg_cache->reg_list[regid];
3073 LOG_DEBUG("[%s]{%d} %s does not exist.",
3074 target_name(target), hartid, gdb_regno_name(regid));
3078 if (reg && reg->valid && hartid == riscv_current_hartid(target)) {
3079 *value = buf_get_u64(reg->value, 0, reg->size);
3080 LOG_DEBUG("{%d} %s: %" PRIx64 " (cached)", hartid,
3081 gdb_regno_name(regid), *value);
3085 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3086 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 &&
3087 riscv_supports_extension(target, hartid, 'E')) {
3092 int result = r->get_register(target, value, hartid, regid);
3094 if (result == ERROR_OK)
3095 reg->valid = gdb_regno_cacheable(regid, false);
3097 LOG_DEBUG("{%d} %s: %" PRIx64, hartid, gdb_regno_name(regid), *value);
3101 bool riscv_is_halted(struct target *target)
3104 assert(r->is_halted);
3105 return r->is_halted(target);
3108 enum riscv_halt_reason riscv_halt_reason(struct target *target, int hartid)
3111 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
3112 return RISCV_HALT_ERROR;
3113 if (!riscv_is_halted(target)) {
3114 LOG_ERROR("Hart is not halted!");
3115 return RISCV_HALT_UNKNOWN;
3117 return r->halt_reason(target);
3120 size_t riscv_debug_buffer_size(struct target *target)
3123 return r->debug_buffer_size[riscv_current_hartid(target)];
3126 int riscv_write_debug_buffer(struct target *target, int index, riscv_insn_t insn)
3129 r->write_debug_buffer(target, index, insn);
3133 riscv_insn_t riscv_read_debug_buffer(struct target *target, int index)
3136 return r->read_debug_buffer(target, index);
3139 int riscv_execute_debug_buffer(struct target *target)
3142 return r->execute_debug_buffer(target);
3145 void riscv_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
3148 r->fill_dmi_write_u64(target, buf, a, d);
3151 void riscv_fill_dmi_read_u64(struct target *target, char *buf, int a)
3154 r->fill_dmi_read_u64(target, buf, a);
3157 void riscv_fill_dmi_nop_u64(struct target *target, char *buf)
3160 r->fill_dmi_nop_u64(target, buf);
3163 int riscv_dmi_write_u64_bits(struct target *target)
3166 return r->dmi_write_u64_bits(target);
3169 bool riscv_hart_enabled(struct target *target, int hartid)
3171 /* FIXME: Add a hart mask to the RTOS. */
3172 if (riscv_rtos_enabled(target))
3173 return hartid < riscv_count_harts(target);
3175 return hartid == target->coreid;
3179 * Count triggers, and initialize trigger_count for each hart.
3180 * trigger_count is initialized even if this function fails to discover
3182 * Disable any hardware triggers that have dmode set. We can't have set them
3183 * ourselves. Maybe they're left over from some killed debug session.
3185 int riscv_enumerate_triggers(struct target *target)
3189 if (r->triggers_enumerated)
3192 r->triggers_enumerated = true; /* At the very least we tried. */
3194 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
3195 if (!riscv_hart_enabled(target, hartid))
3198 riscv_reg_t tselect;
3199 int result = riscv_get_register_on_hart(target, &tselect, hartid,
3201 if (result != ERROR_OK)
3204 for (unsigned t = 0; t < RISCV_MAX_TRIGGERS; ++t) {
3205 r->trigger_count[hartid] = t;
3207 /* If we can't write tselect, then this hart does not support triggers. */
3208 if (riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t) != ERROR_OK)
3210 uint64_t tselect_rb;
3211 result = riscv_get_register_on_hart(target, &tselect_rb, hartid,
3213 if (result != ERROR_OK)
3215 /* Mask off the top bit, which is used as tdrmode in old
3216 * implementations. */
3217 tselect_rb &= ~(1ULL << (riscv_xlen(target)-1));
3218 if (tselect_rb != t)
3221 result = riscv_get_register_on_hart(target, &tdata1, hartid,
3223 if (result != ERROR_OK)
3226 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
3231 /* On these older cores we don't support software using
3233 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3236 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target)))
3237 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3242 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
3244 LOG_INFO("[%d] Found %d triggers", hartid, r->trigger_count[hartid]);
3250 const char *gdb_regno_name(enum gdb_regno regno)
3252 static char buf[32];
3255 case GDB_REGNO_ZERO:
3321 case GDB_REGNO_FPR0:
3323 case GDB_REGNO_FPR31:
3325 case GDB_REGNO_CSR0:
3327 case GDB_REGNO_TSELECT:
3329 case GDB_REGNO_TDATA1:
3331 case GDB_REGNO_TDATA2:
3333 case GDB_REGNO_MISA:
3337 case GDB_REGNO_DCSR:
3339 case GDB_REGNO_DSCRATCH0:
3341 case GDB_REGNO_MSTATUS:
3343 case GDB_REGNO_MEPC:
3345 case GDB_REGNO_MCAUSE:
3347 case GDB_REGNO_PRIV:
3349 case GDB_REGNO_SATP:
3351 case GDB_REGNO_VTYPE:
3420 if (regno <= GDB_REGNO_XPR31)
3421 sprintf(buf, "x%d", regno - GDB_REGNO_ZERO);
3422 else if (regno >= GDB_REGNO_CSR0 && regno <= GDB_REGNO_CSR4095)
3423 sprintf(buf, "csr%d", regno - GDB_REGNO_CSR0);
3424 else if (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31)
3425 sprintf(buf, "f%d", regno - GDB_REGNO_FPR0);
3427 sprintf(buf, "gdb_regno_%d", regno);
3432 static int register_get(struct reg *reg)
3434 riscv_reg_info_t *reg_info = reg->arch_info;
3435 struct target *target = reg_info->target;
3438 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3439 if (!r->get_register_buf) {
3440 LOG_ERROR("Reading register %s not supported on this RISC-V target.",
3441 gdb_regno_name(reg->number));
3445 if (r->get_register_buf(target, reg->value, reg->number) != ERROR_OK)
3449 int result = riscv_get_register(target, &value, reg->number);
3450 if (result != ERROR_OK)
3452 buf_set_u64(reg->value, 0, reg->size, value);
3454 reg->valid = gdb_regno_cacheable(reg->number, false);
3455 char *str = buf_to_hex_str(reg->value, reg->size);
3456 LOG_DEBUG("[%d]{%d} read 0x%s from %s (valid=%d)", target->coreid,
3457 riscv_current_hartid(target), str, reg->name, reg->valid);
3462 static int register_set(struct reg *reg, uint8_t *buf)
3464 riscv_reg_info_t *reg_info = reg->arch_info;
3465 struct target *target = reg_info->target;
3468 char *str = buf_to_hex_str(buf, reg->size);
3469 LOG_DEBUG("[%d]{%d} write 0x%s to %s (valid=%d)", target->coreid,
3470 riscv_current_hartid(target), str, reg->name, reg->valid);
3473 memcpy(reg->value, buf, DIV_ROUND_UP(reg->size, 8));
3474 reg->valid = gdb_regno_cacheable(reg->number, true);
3476 if (reg->number == GDB_REGNO_TDATA1 ||
3477 reg->number == GDB_REGNO_TDATA2) {
3478 r->manual_hwbp_set = true;
3479 /* When enumerating triggers, we clear any triggers with DMODE set,
3480 * assuming they were left over from a previous debug session. So make
3481 * sure that is done before a user might be setting their own triggers.
3483 if (riscv_enumerate_triggers(target) != ERROR_OK)
3487 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3488 if (!r->set_register_buf) {
3489 LOG_ERROR("Writing register %s not supported on this RISC-V target.",
3490 gdb_regno_name(reg->number));
3494 if (r->set_register_buf(target, reg->number, reg->value) != ERROR_OK)
3497 uint64_t value = buf_get_u64(buf, 0, reg->size);
3498 if (riscv_set_register(target, reg->number, value) != ERROR_OK)
3505 static struct reg_arch_type riscv_reg_arch_type = {
3506 .get = register_get,
3515 static int cmp_csr_info(const void *p1, const void *p2)
3517 return (int) (((struct csr_info *)p1)->number) - (int) (((struct csr_info *)p2)->number);
3520 int riscv_init_registers(struct target *target)
3524 riscv_free_registers(target);
3526 target->reg_cache = calloc(1, sizeof(*target->reg_cache));
3527 if (!target->reg_cache)
3529 target->reg_cache->name = "RISC-V Registers";
3530 target->reg_cache->num_regs = GDB_REGNO_COUNT;
3532 if (expose_custom) {
3533 for (unsigned i = 0; expose_custom[i].low <= expose_custom[i].high; i++) {
3534 for (unsigned number = expose_custom[i].low;
3535 number <= expose_custom[i].high;
3537 target->reg_cache->num_regs++;
3541 LOG_DEBUG("create register cache for %d registers",
3542 target->reg_cache->num_regs);
3544 target->reg_cache->reg_list =
3545 calloc(target->reg_cache->num_regs, sizeof(struct reg));
3546 if (!target->reg_cache->reg_list)
3549 const unsigned int max_reg_name_len = 12;
3550 free(info->reg_names);
3552 calloc(target->reg_cache->num_regs, max_reg_name_len);
3553 if (!info->reg_names)
3555 char *reg_name = info->reg_names;
3557 int hartid = riscv_current_hartid(target);
3559 static struct reg_feature feature_cpu = {
3560 .name = "org.gnu.gdb.riscv.cpu"
3562 static struct reg_feature feature_fpu = {
3563 .name = "org.gnu.gdb.riscv.fpu"
3565 static struct reg_feature feature_csr = {
3566 .name = "org.gnu.gdb.riscv.csr"
3568 static struct reg_feature feature_vector = {
3569 .name = "org.gnu.gdb.riscv.vector"
3571 static struct reg_feature feature_virtual = {
3572 .name = "org.gnu.gdb.riscv.virtual"
3574 static struct reg_feature feature_custom = {
3575 .name = "org.gnu.gdb.riscv.custom"
3578 /* These types are built into gdb. */
3579 static struct reg_data_type type_ieee_single = { .type = REG_TYPE_IEEE_SINGLE, .id = "ieee_single" };
3580 static struct reg_data_type type_ieee_double = { .type = REG_TYPE_IEEE_DOUBLE, .id = "ieee_double" };
3581 static struct reg_data_type_union_field single_double_fields[] = {
3582 {"float", &type_ieee_single, single_double_fields + 1},
3583 {"double", &type_ieee_double, NULL},
3585 static struct reg_data_type_union single_double_union = {
3586 .fields = single_double_fields
3588 static struct reg_data_type type_ieee_single_double = {
3589 .type = REG_TYPE_ARCH_DEFINED,
3591 .type_class = REG_TYPE_CLASS_UNION,
3592 .reg_type_union = &single_double_union
3594 static struct reg_data_type type_uint8 = { .type = REG_TYPE_UINT8, .id = "uint8" };
3595 static struct reg_data_type type_uint16 = { .type = REG_TYPE_UINT16, .id = "uint16" };
3596 static struct reg_data_type type_uint32 = { .type = REG_TYPE_UINT32, .id = "uint32" };
3597 static struct reg_data_type type_uint64 = { .type = REG_TYPE_UINT64, .id = "uint64" };
3598 static struct reg_data_type type_uint128 = { .type = REG_TYPE_UINT128, .id = "uint128" };
3600 /* This is roughly the XML we want:
3601 * <vector id="bytes" type="uint8" count="16"/>
3602 * <vector id="shorts" type="uint16" count="8"/>
3603 * <vector id="words" type="uint32" count="4"/>
3604 * <vector id="longs" type="uint64" count="2"/>
3605 * <vector id="quads" type="uint128" count="1"/>
3606 * <union id="riscv_vector_type">
3607 * <field name="b" type="bytes"/>
3608 * <field name="s" type="shorts"/>
3609 * <field name="w" type="words"/>
3610 * <field name="l" type="longs"/>
3611 * <field name="q" type="quads"/>
3615 info->vector_uint8.type = &type_uint8;
3616 info->vector_uint8.count = info->vlenb[hartid];
3617 info->type_uint8_vector.type = REG_TYPE_ARCH_DEFINED;
3618 info->type_uint8_vector.id = "bytes";
3619 info->type_uint8_vector.type_class = REG_TYPE_CLASS_VECTOR;
3620 info->type_uint8_vector.reg_type_vector = &info->vector_uint8;
3622 info->vector_uint16.type = &type_uint16;
3623 info->vector_uint16.count = info->vlenb[hartid] / 2;
3624 info->type_uint16_vector.type = REG_TYPE_ARCH_DEFINED;
3625 info->type_uint16_vector.id = "shorts";
3626 info->type_uint16_vector.type_class = REG_TYPE_CLASS_VECTOR;
3627 info->type_uint16_vector.reg_type_vector = &info->vector_uint16;
3629 info->vector_uint32.type = &type_uint32;
3630 info->vector_uint32.count = info->vlenb[hartid] / 4;
3631 info->type_uint32_vector.type = REG_TYPE_ARCH_DEFINED;
3632 info->type_uint32_vector.id = "words";
3633 info->type_uint32_vector.type_class = REG_TYPE_CLASS_VECTOR;
3634 info->type_uint32_vector.reg_type_vector = &info->vector_uint32;
3636 info->vector_uint64.type = &type_uint64;
3637 info->vector_uint64.count = info->vlenb[hartid] / 8;
3638 info->type_uint64_vector.type = REG_TYPE_ARCH_DEFINED;
3639 info->type_uint64_vector.id = "longs";
3640 info->type_uint64_vector.type_class = REG_TYPE_CLASS_VECTOR;
3641 info->type_uint64_vector.reg_type_vector = &info->vector_uint64;
3643 info->vector_uint128.type = &type_uint128;
3644 info->vector_uint128.count = info->vlenb[hartid] / 16;
3645 info->type_uint128_vector.type = REG_TYPE_ARCH_DEFINED;
3646 info->type_uint128_vector.id = "quads";
3647 info->type_uint128_vector.type_class = REG_TYPE_CLASS_VECTOR;
3648 info->type_uint128_vector.reg_type_vector = &info->vector_uint128;
3650 info->vector_fields[0].name = "b";
3651 info->vector_fields[0].type = &info->type_uint8_vector;
3652 if (info->vlenb[hartid] >= 2) {
3653 info->vector_fields[0].next = info->vector_fields + 1;
3654 info->vector_fields[1].name = "s";
3655 info->vector_fields[1].type = &info->type_uint16_vector;
3657 info->vector_fields[0].next = NULL;
3659 if (info->vlenb[hartid] >= 4) {
3660 info->vector_fields[1].next = info->vector_fields + 2;
3661 info->vector_fields[2].name = "w";
3662 info->vector_fields[2].type = &info->type_uint32_vector;
3664 info->vector_fields[1].next = NULL;
3666 if (info->vlenb[hartid] >= 8) {
3667 info->vector_fields[2].next = info->vector_fields + 3;
3668 info->vector_fields[3].name = "l";
3669 info->vector_fields[3].type = &info->type_uint64_vector;
3671 info->vector_fields[2].next = NULL;
3673 if (info->vlenb[hartid] >= 16) {
3674 info->vector_fields[3].next = info->vector_fields + 4;
3675 info->vector_fields[4].name = "q";
3676 info->vector_fields[4].type = &info->type_uint128_vector;
3678 info->vector_fields[3].next = NULL;
3680 info->vector_fields[4].next = NULL;
3682 info->vector_union.fields = info->vector_fields;
3684 info->type_vector.type = REG_TYPE_ARCH_DEFINED;
3685 info->type_vector.id = "riscv_vector";
3686 info->type_vector.type_class = REG_TYPE_CLASS_UNION;
3687 info->type_vector.reg_type_union = &info->vector_union;
3689 struct csr_info csr_info[] = {
3690 #define DECLARE_CSR(name, number) { number, #name },
3691 #include "encoding.h"
3694 /* encoding.h does not contain the registers in sorted order. */
3695 qsort(csr_info, DIM(csr_info), sizeof(*csr_info), cmp_csr_info);
3696 unsigned csr_info_index = 0;
3698 unsigned custom_range_index = 0;
3699 int custom_within_range = 0;
3701 riscv_reg_info_t *shared_reg_info = calloc(1, sizeof(riscv_reg_info_t));
3702 if (!shared_reg_info)
3704 shared_reg_info->target = target;
3706 /* When gdb requests register N, gdb_get_register_packet() assumes that this
3707 * is register at index N in reg_list. So if there are certain registers
3708 * that don't exist, we need to leave holes in the list (or renumber, but
3709 * it would be nice not to have yet another set of numbers to translate
3711 for (uint32_t number = 0; number < target->reg_cache->num_regs; number++) {
3712 struct reg *r = &target->reg_cache->reg_list[number];
3716 r->type = &riscv_reg_arch_type;
3717 r->arch_info = shared_reg_info;
3719 r->size = riscv_xlen(target);
3720 /* r->size is set in riscv_invalidate_register_cache, maybe because the
3721 * target is in theory allowed to change XLEN on us. But I expect a lot
3722 * of other things to break in that case as well. */
3723 if (number <= GDB_REGNO_XPR31) {
3724 r->exist = number <= GDB_REGNO_XPR15 ||
3725 !riscv_supports_extension(target, hartid, 'E');
3726 /* TODO: For now we fake that all GPRs exist because otherwise gdb
3729 r->caller_save = true;
3731 case GDB_REGNO_ZERO:
3828 r->group = "general";
3829 r->feature = &feature_cpu;
3830 } else if (number == GDB_REGNO_PC) {
3831 r->caller_save = true;
3832 sprintf(reg_name, "pc");
3833 r->group = "general";
3834 r->feature = &feature_cpu;
3835 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
3836 r->caller_save = true;
3837 if (riscv_supports_extension(target, hartid, 'D')) {
3839 if (riscv_supports_extension(target, hartid, 'F'))
3840 r->reg_data_type = &type_ieee_single_double;
3842 r->reg_data_type = &type_ieee_double;
3843 } else if (riscv_supports_extension(target, hartid, 'F')) {
3844 r->reg_data_type = &type_ieee_single;
3928 case GDB_REGNO_FS10:
3931 case GDB_REGNO_FS11:
3940 case GDB_REGNO_FT10:
3943 case GDB_REGNO_FT11:
3948 r->feature = &feature_fpu;
3949 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
3951 r->feature = &feature_csr;
3952 unsigned csr_number = number - GDB_REGNO_CSR0;
3954 while (csr_info[csr_info_index].number < csr_number &&
3955 csr_info_index < DIM(csr_info) - 1) {
3958 if (csr_info[csr_info_index].number == csr_number) {
3959 r->name = csr_info[csr_info_index].name;
3961 sprintf(reg_name, "csr%d", csr_number);
3962 /* Assume unnamed registers don't exist, unless we have some
3963 * configuration that tells us otherwise. That's important
3964 * because eg. Eclipse crashes if a target has too many
3965 * registers, and apparently has no way of only showing a
3966 * subset of registers in any case. */
3970 switch (csr_number) {
3974 r->exist = riscv_supports_extension(target, hartid, 'F');
3976 r->feature = &feature_fpu;
3982 case CSR_SCOUNTEREN:
3988 r->exist = riscv_supports_extension(target, hartid, 'S');
3992 /* "In systems with only M-mode, or with both M-mode and
3993 * U-mode but without U-mode trap support, the medeleg and
3994 * mideleg registers should not exist." */
3995 r->exist = riscv_supports_extension(target, hartid, 'S') ||
3996 riscv_supports_extension(target, hartid, 'N');
4004 case CSR_HPMCOUNTER3H:
4005 case CSR_HPMCOUNTER4H:
4006 case CSR_HPMCOUNTER5H:
4007 case CSR_HPMCOUNTER6H:
4008 case CSR_HPMCOUNTER7H:
4009 case CSR_HPMCOUNTER8H:
4010 case CSR_HPMCOUNTER9H:
4011 case CSR_HPMCOUNTER10H:
4012 case CSR_HPMCOUNTER11H:
4013 case CSR_HPMCOUNTER12H:
4014 case CSR_HPMCOUNTER13H:
4015 case CSR_HPMCOUNTER14H:
4016 case CSR_HPMCOUNTER15H:
4017 case CSR_HPMCOUNTER16H:
4018 case CSR_HPMCOUNTER17H:
4019 case CSR_HPMCOUNTER18H:
4020 case CSR_HPMCOUNTER19H:
4021 case CSR_HPMCOUNTER20H:
4022 case CSR_HPMCOUNTER21H:
4023 case CSR_HPMCOUNTER22H:
4024 case CSR_HPMCOUNTER23H:
4025 case CSR_HPMCOUNTER24H:
4026 case CSR_HPMCOUNTER25H:
4027 case CSR_HPMCOUNTER26H:
4028 case CSR_HPMCOUNTER27H:
4029 case CSR_HPMCOUNTER28H:
4030 case CSR_HPMCOUNTER29H:
4031 case CSR_HPMCOUNTER30H:
4032 case CSR_HPMCOUNTER31H:
4035 case CSR_MHPMCOUNTER3H:
4036 case CSR_MHPMCOUNTER4H:
4037 case CSR_MHPMCOUNTER5H:
4038 case CSR_MHPMCOUNTER6H:
4039 case CSR_MHPMCOUNTER7H:
4040 case CSR_MHPMCOUNTER8H:
4041 case CSR_MHPMCOUNTER9H:
4042 case CSR_MHPMCOUNTER10H:
4043 case CSR_MHPMCOUNTER11H:
4044 case CSR_MHPMCOUNTER12H:
4045 case CSR_MHPMCOUNTER13H:
4046 case CSR_MHPMCOUNTER14H:
4047 case CSR_MHPMCOUNTER15H:
4048 case CSR_MHPMCOUNTER16H:
4049 case CSR_MHPMCOUNTER17H:
4050 case CSR_MHPMCOUNTER18H:
4051 case CSR_MHPMCOUNTER19H:
4052 case CSR_MHPMCOUNTER20H:
4053 case CSR_MHPMCOUNTER21H:
4054 case CSR_MHPMCOUNTER22H:
4055 case CSR_MHPMCOUNTER23H:
4056 case CSR_MHPMCOUNTER24H:
4057 case CSR_MHPMCOUNTER25H:
4058 case CSR_MHPMCOUNTER26H:
4059 case CSR_MHPMCOUNTER27H:
4060 case CSR_MHPMCOUNTER28H:
4061 case CSR_MHPMCOUNTER29H:
4062 case CSR_MHPMCOUNTER30H:
4063 case CSR_MHPMCOUNTER31H:
4064 r->exist = riscv_xlen(target) == 32;
4073 r->exist = riscv_supports_extension(target, hartid, 'V');
4077 if (!r->exist && expose_csr) {
4078 for (unsigned i = 0; expose_csr[i].low <= expose_csr[i].high; i++) {
4079 if (csr_number >= expose_csr[i].low && csr_number <= expose_csr[i].high) {
4080 LOG_INFO("Exposing additional CSR %d", csr_number);
4087 } else if (number == GDB_REGNO_PRIV) {
4088 sprintf(reg_name, "priv");
4089 r->group = "general";
4090 r->feature = &feature_virtual;
4093 } else if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) {
4094 r->caller_save = false;
4095 r->exist = riscv_supports_extension(target, hartid, 'V') && info->vlenb[hartid];
4096 r->size = info->vlenb[hartid] * 8;
4097 sprintf(reg_name, "v%d", number - GDB_REGNO_V0);
4098 r->group = "vector";
4099 r->feature = &feature_vector;
4100 r->reg_data_type = &info->type_vector;
4102 } else if (number >= GDB_REGNO_COUNT) {
4103 /* Custom registers. */
4104 assert(expose_custom);
4106 range_t *range = &expose_custom[custom_range_index];
4107 assert(range->low <= range->high);
4108 unsigned custom_number = range->low + custom_within_range;
4110 r->group = "custom";
4111 r->feature = &feature_custom;
4112 r->arch_info = calloc(1, sizeof(riscv_reg_info_t));
4115 ((riscv_reg_info_t *) r->arch_info)->target = target;
4116 ((riscv_reg_info_t *) r->arch_info)->custom_number = custom_number;
4117 sprintf(reg_name, "custom%d", custom_number);
4119 custom_within_range++;
4120 if (custom_within_range > range->high - range->low) {
4121 custom_within_range = 0;
4122 custom_range_index++;
4128 reg_name += strlen(reg_name) + 1;
4129 assert(reg_name < info->reg_names + target->reg_cache->num_regs *
4131 r->value = info->reg_cache_values[number];
4138 void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field,
4139 riscv_bscan_tunneled_scan_context_t *ctxt)
4141 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
4143 memset(ctxt->tunneled_dr, 0, sizeof(ctxt->tunneled_dr));
4144 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
4145 ctxt->tunneled_dr[3].num_bits = 1;
4146 ctxt->tunneled_dr[3].out_value = bscan_one;
4147 ctxt->tunneled_dr[2].num_bits = 7;
4148 ctxt->tunneled_dr_width = field->num_bits;
4149 ctxt->tunneled_dr[2].out_value = &ctxt->tunneled_dr_width;
4150 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4151 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4153 ctxt->tunneled_dr[1].num_bits = field->num_bits + 1;
4154 ctxt->tunneled_dr[1].out_value = field->out_value;
4155 ctxt->tunneled_dr[1].in_value = field->in_value;
4157 ctxt->tunneled_dr[0].num_bits = 3;
4158 ctxt->tunneled_dr[0].out_value = bscan_zero;
4160 /* BSCAN_TUNNEL_NESTED_TAP */
4161 ctxt->tunneled_dr[0].num_bits = 1;
4162 ctxt->tunneled_dr[0].out_value = bscan_one;
4163 ctxt->tunneled_dr[1].num_bits = 7;
4164 ctxt->tunneled_dr_width = field->num_bits;
4165 ctxt->tunneled_dr[1].out_value = &ctxt->tunneled_dr_width;
4166 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4167 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4168 ctxt->tunneled_dr[2].num_bits = field->num_bits + 1;
4169 ctxt->tunneled_dr[2].out_value = field->out_value;
4170 ctxt->tunneled_dr[2].in_value = field->in_value;
4171 ctxt->tunneled_dr[3].num_bits = 3;
4172 ctxt->tunneled_dr[3].out_value = bscan_zero;
4174 jtag_add_dr_scan(target->tap, ARRAY_SIZE(ctxt->tunneled_dr), ctxt->tunneled_dr, TAP_IDLE);