1 /* SPDX-License-Identifier: GPL-2.0-or-later */
11 #include "target/target.h"
12 #include "target/algorithm.h"
13 #include "target/target_type.h"
15 #include "jtag/jtag.h"
16 #include "target/register.h"
17 #include "target/breakpoints.h"
18 #include "helper/time_support.h"
21 #include "rtos/rtos.h"
23 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
24 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
26 /* Constants for legacy SiFive hardware breakpoints. */
27 #define CSR_BPCONTROL_X (1<<0)
28 #define CSR_BPCONTROL_W (1<<1)
29 #define CSR_BPCONTROL_R (1<<2)
30 #define CSR_BPCONTROL_U (1<<3)
31 #define CSR_BPCONTROL_S (1<<4)
32 #define CSR_BPCONTROL_H (1<<5)
33 #define CSR_BPCONTROL_M (1<<6)
34 #define CSR_BPCONTROL_BPMATCH (0xf<<7)
35 #define CSR_BPCONTROL_BPACTION (0xff<<11)
37 #define DEBUG_ROM_START 0x800
38 #define DEBUG_ROM_RESUME (DEBUG_ROM_START + 4)
39 #define DEBUG_ROM_EXCEPTION (DEBUG_ROM_START + 8)
40 #define DEBUG_RAM_START 0x400
42 #define SETHALTNOT 0x10c
44 /*** JTAG registers. ***/
46 #define DTMCONTROL 0x10
47 #define DTMCONTROL_DBUS_RESET (1<<16)
48 #define DTMCONTROL_IDLE (7<<10)
49 #define DTMCONTROL_ADDRBITS (0xf<<4)
50 #define DTMCONTROL_VERSION (0xf)
53 #define DBUS_OP_START 0
54 #define DBUS_OP_SIZE 2
61 DBUS_STATUS_SUCCESS = 0,
62 DBUS_STATUS_FAILED = 2,
65 #define DBUS_DATA_START 2
66 #define DBUS_DATA_SIZE 34
67 #define DBUS_ADDRESS_START 36
75 /*** Debug Bus registers. ***/
77 #define DMCONTROL 0x10
78 #define DMCONTROL_INTERRUPT (((uint64_t)1)<<33)
79 #define DMCONTROL_HALTNOT (((uint64_t)1)<<32)
80 #define DMCONTROL_BUSERROR (7<<19)
81 #define DMCONTROL_SERIAL (3<<16)
82 #define DMCONTROL_AUTOINCREMENT (1<<15)
83 #define DMCONTROL_ACCESS (7<<12)
84 #define DMCONTROL_HARTID (0x3ff<<2)
85 #define DMCONTROL_NDRESET (1<<1)
86 #define DMCONTROL_FULLRESET 1
89 #define DMINFO_ABUSSIZE (0x7fU<<25)
90 #define DMINFO_SERIALCOUNT (0xf<<21)
91 #define DMINFO_ACCESS128 (1<<20)
92 #define DMINFO_ACCESS64 (1<<19)
93 #define DMINFO_ACCESS32 (1<<18)
94 #define DMINFO_ACCESS16 (1<<17)
95 #define DMINFO_ACCESS8 (1<<16)
96 #define DMINFO_DRAMSIZE (0x3f<<10)
97 #define DMINFO_AUTHENTICATED (1<<5)
98 #define DMINFO_AUTHBUSY (1<<4)
99 #define DMINFO_AUTHTYPE (3<<2)
100 #define DMINFO_VERSION 3
102 /*** Info about the core being debugged. ***/
104 #define DBUS_ADDRESS_UNKNOWN 0xffff
107 #define DRAM_CACHE_SIZE 16
109 uint8_t ir_dtmcontrol[4] = {DTMCONTROL};
110 struct scan_field select_dtmcontrol = {
112 .out_value = ir_dtmcontrol
114 uint8_t ir_dbus[4] = {DBUS};
115 struct scan_field select_dbus = {
119 uint8_t ir_idcode[4] = {0x1};
120 struct scan_field select_idcode = {
122 .out_value = ir_idcode
125 bscan_tunnel_type_t bscan_tunnel_type;
126 int bscan_tunnel_ir_width; /* if zero, then tunneling is not present/active */
128 static uint8_t bscan_zero[4] = {0};
129 static uint8_t bscan_one[4] = {1};
131 uint8_t ir_user4[4] = {0x23};
132 struct scan_field select_user4 = {
134 .out_value = ir_user4
138 uint8_t bscan_tunneled_ir_width[4] = {5}; /* overridden by assignment in riscv_init_target */
139 struct scan_field _bscan_tunnel_data_register_select_dmi[] = {
142 .out_value = bscan_zero,
146 .num_bits = 5, /* initialized in riscv_init_target to ir width of DM */
147 .out_value = ir_dbus,
152 .out_value = bscan_tunneled_ir_width,
157 .out_value = bscan_zero,
162 struct scan_field _bscan_tunnel_nested_tap_select_dmi[] = {
165 .out_value = bscan_zero,
170 .out_value = bscan_tunneled_ir_width,
174 .num_bits = 0, /* initialized in riscv_init_target to ir width of DM */
175 .out_value = ir_dbus,
180 .out_value = bscan_zero,
184 struct scan_field *bscan_tunnel_nested_tap_select_dmi = _bscan_tunnel_nested_tap_select_dmi;
185 uint32_t bscan_tunnel_nested_tap_select_dmi_num_fields = ARRAY_SIZE(_bscan_tunnel_nested_tap_select_dmi);
187 struct scan_field *bscan_tunnel_data_register_select_dmi = _bscan_tunnel_data_register_select_dmi;
188 uint32_t bscan_tunnel_data_register_select_dmi_num_fields = ARRAY_SIZE(_bscan_tunnel_data_register_select_dmi);
195 bool read, write, execute;
199 /* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
200 int riscv_command_timeout_sec = DEFAULT_COMMAND_TIMEOUT_SEC;
202 /* Wall-clock timeout after reset. Settable via RISC-V Target commands.*/
203 int riscv_reset_timeout_sec = DEFAULT_RESET_TIMEOUT_SEC;
205 bool riscv_prefer_sba;
206 bool riscv_enable_virt2phys = true;
207 bool riscv_ebreakm = true;
208 bool riscv_ebreaks = true;
209 bool riscv_ebreaku = true;
211 bool riscv_enable_virtual;
217 /* In addition to the ones in the standard spec, we'll also expose additional
219 * The list is either NULL, or a series of ranges (inclusive), terminated with
222 /* Same, but for custom registers. */
223 range_t *expose_custom;
230 virt2phys_info_t sv32 = {
235 .vpn_shift = {12, 22},
236 .vpn_mask = {0x3ff, 0x3ff},
237 .pte_ppn_shift = {10, 20},
238 .pte_ppn_mask = {0x3ff, 0xfff},
239 .pa_ppn_shift = {12, 22},
240 .pa_ppn_mask = {0x3ff, 0xfff},
243 virt2phys_info_t sv39 = {
248 .vpn_shift = {12, 21, 30},
249 .vpn_mask = {0x1ff, 0x1ff, 0x1ff},
250 .pte_ppn_shift = {10, 19, 28},
251 .pte_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
252 .pa_ppn_shift = {12, 21, 30},
253 .pa_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
256 virt2phys_info_t sv48 = {
261 .vpn_shift = {12, 21, 30, 39},
262 .vpn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ff},
263 .pte_ppn_shift = {10, 19, 28, 37},
264 .pte_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
265 .pa_ppn_shift = {12, 21, 30, 39},
266 .pa_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
269 static int riscv_resume_go_all_harts(struct target *target);
271 void select_dmi_via_bscan(struct target *target)
273 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
274 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
275 jtag_add_dr_scan(target->tap, bscan_tunnel_data_register_select_dmi_num_fields,
276 bscan_tunnel_data_register_select_dmi, TAP_IDLE);
277 else /* BSCAN_TUNNEL_NESTED_TAP */
278 jtag_add_dr_scan(target->tap, bscan_tunnel_nested_tap_select_dmi_num_fields,
279 bscan_tunnel_nested_tap_select_dmi, TAP_IDLE);
282 uint32_t dtmcontrol_scan_via_bscan(struct target *target, uint32_t out)
284 /* On BSCAN TAP: Select IR=USER4, issue tunneled IR scan via BSCAN TAP's DR */
285 uint8_t tunneled_ir_width[4] = {bscan_tunnel_ir_width};
286 uint8_t tunneled_dr_width[4] = {32};
287 uint8_t out_value[5] = {0};
288 uint8_t in_value[5] = {0};
290 buf_set_u32(out_value, 0, 32, out);
291 struct scan_field tunneled_ir[4] = {};
292 struct scan_field tunneled_dr[4] = {};
294 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
295 tunneled_ir[0].num_bits = 3;
296 tunneled_ir[0].out_value = bscan_zero;
297 tunneled_ir[0].in_value = NULL;
298 tunneled_ir[1].num_bits = bscan_tunnel_ir_width;
299 tunneled_ir[1].out_value = ir_dtmcontrol;
300 tunneled_ir[1].in_value = NULL;
301 tunneled_ir[2].num_bits = 7;
302 tunneled_ir[2].out_value = tunneled_ir_width;
303 tunneled_ir[2].in_value = NULL;
304 tunneled_ir[3].num_bits = 1;
305 tunneled_ir[3].out_value = bscan_zero;
306 tunneled_ir[3].in_value = NULL;
308 tunneled_dr[0].num_bits = 3;
309 tunneled_dr[0].out_value = bscan_zero;
310 tunneled_dr[0].in_value = NULL;
311 tunneled_dr[1].num_bits = 32 + 1;
312 tunneled_dr[1].out_value = out_value;
313 tunneled_dr[1].in_value = in_value;
314 tunneled_dr[2].num_bits = 7;
315 tunneled_dr[2].out_value = tunneled_dr_width;
316 tunneled_dr[2].in_value = NULL;
317 tunneled_dr[3].num_bits = 1;
318 tunneled_dr[3].out_value = bscan_one;
319 tunneled_dr[3].in_value = NULL;
321 /* BSCAN_TUNNEL_NESTED_TAP */
322 tunneled_ir[3].num_bits = 3;
323 tunneled_ir[3].out_value = bscan_zero;
324 tunneled_ir[3].in_value = NULL;
325 tunneled_ir[2].num_bits = bscan_tunnel_ir_width;
326 tunneled_ir[2].out_value = ir_dtmcontrol;
327 tunneled_ir[1].in_value = NULL;
328 tunneled_ir[1].num_bits = 7;
329 tunneled_ir[1].out_value = tunneled_ir_width;
330 tunneled_ir[2].in_value = NULL;
331 tunneled_ir[0].num_bits = 1;
332 tunneled_ir[0].out_value = bscan_zero;
333 tunneled_ir[0].in_value = NULL;
335 tunneled_dr[3].num_bits = 3;
336 tunneled_dr[3].out_value = bscan_zero;
337 tunneled_dr[3].in_value = NULL;
338 tunneled_dr[2].num_bits = 32 + 1;
339 tunneled_dr[2].out_value = out_value;
340 tunneled_dr[2].in_value = in_value;
341 tunneled_dr[1].num_bits = 7;
342 tunneled_dr[1].out_value = tunneled_dr_width;
343 tunneled_dr[1].in_value = NULL;
344 tunneled_dr[0].num_bits = 1;
345 tunneled_dr[0].out_value = bscan_one;
346 tunneled_dr[0].in_value = NULL;
348 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
349 jtag_add_dr_scan(target->tap, ARRAY_SIZE(tunneled_ir), tunneled_ir, TAP_IDLE);
350 jtag_add_dr_scan(target->tap, ARRAY_SIZE(tunneled_dr), tunneled_dr, TAP_IDLE);
351 select_dmi_via_bscan(target);
353 int retval = jtag_execute_queue();
354 if (retval != ERROR_OK) {
355 LOG_ERROR("failed jtag scan: %d", retval);
358 /* Note the starting offset is bit 1, not bit 0. In BSCAN tunnel, there is a one-bit TCK skew between
360 uint32_t in = buf_get_u32(in_value, 1, 32);
361 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
368 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
370 struct scan_field field;
372 uint8_t out_value[4] = { 0 };
374 if (bscan_tunnel_ir_width != 0)
375 return dtmcontrol_scan_via_bscan(target, out);
378 buf_set_u32(out_value, 0, 32, out);
380 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
383 field.out_value = out_value;
384 field.in_value = in_value;
385 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
387 /* Always return to dbus. */
388 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
390 int retval = jtag_execute_queue();
391 if (retval != ERROR_OK) {
392 LOG_ERROR("failed jtag scan: %d", retval);
396 uint32_t in = buf_get_u32(field.in_value, 0, 32);
397 LOG_DEBUG("DTMCONTROL: 0x%x -> 0x%x", out, in);
402 static struct target_type *get_target_type(struct target *target)
404 riscv_info_t *info = (riscv_info_t *) target->arch_info;
407 LOG_ERROR("Target has not been initialized");
411 switch (info->dtm_version) {
413 return &riscv011_target;
415 return &riscv013_target;
417 LOG_ERROR("Unsupported DTM version: %d", info->dtm_version);
422 static int riscv_init_target(struct command_context *cmd_ctx,
423 struct target *target)
425 LOG_DEBUG("riscv_init_target()");
426 target->arch_info = calloc(1, sizeof(riscv_info_t));
427 if (!target->arch_info)
429 riscv_info_t *info = (riscv_info_t *) target->arch_info;
430 riscv_info_init(target, info);
431 info->cmd_ctx = cmd_ctx;
433 select_dtmcontrol.num_bits = target->tap->ir_length;
434 select_dbus.num_bits = target->tap->ir_length;
435 select_idcode.num_bits = target->tap->ir_length;
437 if (bscan_tunnel_ir_width != 0) {
438 select_user4.num_bits = target->tap->ir_length;
439 bscan_tunneled_ir_width[0] = bscan_tunnel_ir_width;
440 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
441 bscan_tunnel_data_register_select_dmi[1].num_bits = bscan_tunnel_ir_width;
442 else /* BSCAN_TUNNEL_NESTED_TAP */
443 bscan_tunnel_nested_tap_select_dmi[2].num_bits = bscan_tunnel_ir_width;
446 riscv_semihosting_init(target);
448 target->debug_reason = DBG_REASON_DBGRQ;
453 static void riscv_free_registers(struct target *target)
455 /* Free the shared structure use for most registers. */
456 if (target->reg_cache) {
457 if (target->reg_cache->reg_list) {
458 free(target->reg_cache->reg_list[0].arch_info);
459 /* Free the ones we allocated separately. */
460 for (unsigned i = GDB_REGNO_COUNT; i < target->reg_cache->num_regs; i++)
461 free(target->reg_cache->reg_list[i].arch_info);
462 free(target->reg_cache->reg_list);
464 free(target->reg_cache);
468 static void riscv_deinit_target(struct target *target)
470 LOG_DEBUG("riscv_deinit_target()");
471 struct target_type *tt = get_target_type(target);
473 tt->deinit_target(target);
474 riscv_info_t *info = (riscv_info_t *) target->arch_info;
475 free(info->reg_names);
479 riscv_free_registers(target);
481 target->arch_info = NULL;
484 static void trigger_from_breakpoint(struct trigger *trigger,
485 const struct breakpoint *breakpoint)
487 trigger->address = breakpoint->address;
488 trigger->length = breakpoint->length;
489 trigger->mask = ~0LL;
490 trigger->read = false;
491 trigger->write = false;
492 trigger->execute = true;
493 /* unique_id is unique across both breakpoints and watchpoints. */
494 trigger->unique_id = breakpoint->unique_id;
497 static int maybe_add_trigger_t1(struct target *target, unsigned hartid,
498 struct trigger *trigger, uint64_t tdata1)
502 const uint32_t bpcontrol_x = 1<<0;
503 const uint32_t bpcontrol_w = 1<<1;
504 const uint32_t bpcontrol_r = 1<<2;
505 const uint32_t bpcontrol_u = 1<<3;
506 const uint32_t bpcontrol_s = 1<<4;
507 const uint32_t bpcontrol_h = 1<<5;
508 const uint32_t bpcontrol_m = 1<<6;
509 const uint32_t bpcontrol_bpmatch = 0xf << 7;
510 const uint32_t bpcontrol_bpaction = 0xff << 11;
512 if (tdata1 & (bpcontrol_r | bpcontrol_w | bpcontrol_x)) {
513 /* Trigger is already in use, presumably by user code. */
514 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
517 tdata1 = set_field(tdata1, bpcontrol_r, trigger->read);
518 tdata1 = set_field(tdata1, bpcontrol_w, trigger->write);
519 tdata1 = set_field(tdata1, bpcontrol_x, trigger->execute);
520 tdata1 = set_field(tdata1, bpcontrol_u,
521 !!(r->misa[hartid] & (1 << ('U' - 'A'))));
522 tdata1 = set_field(tdata1, bpcontrol_s,
523 !!(r->misa[hartid] & (1 << ('S' - 'A'))));
524 tdata1 = set_field(tdata1, bpcontrol_h,
525 !!(r->misa[hartid] & (1 << ('H' - 'A'))));
526 tdata1 |= bpcontrol_m;
527 tdata1 = set_field(tdata1, bpcontrol_bpmatch, 0); /* exact match */
528 tdata1 = set_field(tdata1, bpcontrol_bpaction, 0); /* cause bp exception */
530 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
532 riscv_reg_t tdata1_rb;
533 if (riscv_get_register_on_hart(target, &tdata1_rb, hartid,
534 GDB_REGNO_TDATA1) != ERROR_OK)
536 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
538 if (tdata1 != tdata1_rb) {
539 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
540 PRIx64 " to tdata1 it contains 0x%" PRIx64,
542 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
543 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
546 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
551 static int maybe_add_trigger_t2(struct target *target, unsigned hartid,
552 struct trigger *trigger, uint64_t tdata1)
556 /* tselect is already set */
557 if (tdata1 & (MCONTROL_EXECUTE | MCONTROL_STORE | MCONTROL_LOAD)) {
558 /* Trigger is already in use, presumably by user code. */
559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
562 /* address/data match trigger */
563 tdata1 |= MCONTROL_DMODE(riscv_xlen(target));
564 tdata1 = set_field(tdata1, MCONTROL_ACTION,
565 MCONTROL_ACTION_DEBUG_MODE);
566 tdata1 = set_field(tdata1, MCONTROL_MATCH, MCONTROL_MATCH_EQUAL);
567 tdata1 |= MCONTROL_M;
568 if (r->misa[hartid] & (1 << ('H' - 'A')))
569 tdata1 |= MCONTROL_H;
570 if (r->misa[hartid] & (1 << ('S' - 'A')))
571 tdata1 |= MCONTROL_S;
572 if (r->misa[hartid] & (1 << ('U' - 'A')))
573 tdata1 |= MCONTROL_U;
575 if (trigger->execute)
576 tdata1 |= MCONTROL_EXECUTE;
578 tdata1 |= MCONTROL_LOAD;
580 tdata1 |= MCONTROL_STORE;
582 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
585 int result = riscv_get_register_on_hart(target, &tdata1_rb, hartid, GDB_REGNO_TDATA1);
586 if (result != ERROR_OK)
588 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
590 if (tdata1 != tdata1_rb) {
591 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
592 PRIx64 " to tdata1 it contains 0x%" PRIx64,
594 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
595 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
598 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
603 static int add_trigger(struct target *target, struct trigger *trigger)
607 if (riscv_enumerate_triggers(target) != ERROR_OK)
610 /* In RTOS mode, we need to set the same trigger in the same slot on every
611 * hart, to keep up the illusion that each hart is a thread running on the
614 /* Otherwise, we just set the trigger on the one hart this target deals
617 riscv_reg_t tselect[RISCV_MAX_HARTS];
620 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
621 if (!riscv_hart_enabled(target, hartid))
625 int result = riscv_get_register_on_hart(target, &tselect[hartid],
626 hartid, GDB_REGNO_TSELECT);
627 if (result != ERROR_OK)
630 assert(first_hart >= 0);
633 for (i = 0; i < r->trigger_count[first_hart]; i++) {
634 if (r->trigger_unique_id[i] != -1)
637 riscv_set_register_on_hart(target, first_hart, GDB_REGNO_TSELECT, i);
640 int result = riscv_get_register_on_hart(target, &tdata1, first_hart,
642 if (result != ERROR_OK)
644 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
647 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
648 if (!riscv_hart_enabled(target, hartid))
650 if (hartid > first_hart)
651 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
654 result = maybe_add_trigger_t1(target, hartid, trigger, tdata1);
657 result = maybe_add_trigger_t2(target, hartid, trigger, tdata1);
660 LOG_DEBUG("trigger %d has unknown type %d", i, type);
664 if (result != ERROR_OK)
668 if (result != ERROR_OK)
671 LOG_DEBUG("[%d] Using trigger %d (type %d) for bp %d", target->coreid,
672 i, type, trigger->unique_id);
673 r->trigger_unique_id[i] = trigger->unique_id;
677 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
678 if (!riscv_hart_enabled(target, hartid))
680 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT,
684 if (i >= r->trigger_count[first_hart]) {
685 LOG_ERROR("Couldn't find an available hardware trigger.");
686 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
692 int riscv_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
694 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, breakpoint->address);
696 if (breakpoint->type == BKPT_SOFT) {
697 /** @todo check RVC for size/alignment */
698 if (!(breakpoint->length == 4 || breakpoint->length == 2)) {
699 LOG_ERROR("Invalid breakpoint length %d", breakpoint->length);
703 if (0 != (breakpoint->address % 2)) {
704 LOG_ERROR("Invalid breakpoint alignment for address 0x%" TARGET_PRIxADDR, breakpoint->address);
708 if (target_read_memory(target, breakpoint->address, 2, breakpoint->length / 2,
709 breakpoint->orig_instr) != ERROR_OK) {
710 LOG_ERROR("Failed to read original instruction at 0x%" TARGET_PRIxADDR,
711 breakpoint->address);
715 uint8_t buff[4] = { 0 };
716 buf_set_u32(buff, 0, breakpoint->length * CHAR_BIT, breakpoint->length == 4 ? ebreak() : ebreak_c());
717 int const retval = target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2, buff);
719 if (retval != ERROR_OK) {
720 LOG_ERROR("Failed to write %d-byte breakpoint instruction at 0x%"
721 TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
725 } else if (breakpoint->type == BKPT_HARD) {
726 struct trigger trigger;
727 trigger_from_breakpoint(&trigger, breakpoint);
728 int const result = add_trigger(target, &trigger);
729 if (result != ERROR_OK)
732 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
733 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
736 breakpoint->set = true;
740 static int remove_trigger(struct target *target, struct trigger *trigger)
744 if (riscv_enumerate_triggers(target) != ERROR_OK)
748 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
749 if (!riscv_hart_enabled(target, hartid))
751 if (first_hart < 0) {
756 assert(first_hart >= 0);
759 for (i = 0; i < r->trigger_count[first_hart]; i++) {
760 if (r->trigger_unique_id[i] == trigger->unique_id)
763 if (i >= r->trigger_count[first_hart]) {
764 LOG_ERROR("Couldn't find the hardware resources used by hardware "
768 LOG_DEBUG("[%d] Stop using resource %d for bp %d", target->coreid, i,
770 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
771 if (!riscv_hart_enabled(target, hartid))
774 int result = riscv_get_register_on_hart(target, &tselect, hartid, GDB_REGNO_TSELECT);
775 if (result != ERROR_OK)
777 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
778 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
779 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
781 r->trigger_unique_id[i] = -1;
786 int riscv_remove_breakpoint(struct target *target,
787 struct breakpoint *breakpoint)
789 if (breakpoint->type == BKPT_SOFT) {
790 if (target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2,
791 breakpoint->orig_instr) != ERROR_OK) {
792 LOG_ERROR("Failed to restore instruction for %d-byte breakpoint at "
793 "0x%" TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
797 } else if (breakpoint->type == BKPT_HARD) {
798 struct trigger trigger;
799 trigger_from_breakpoint(&trigger, breakpoint);
800 int result = remove_trigger(target, &trigger);
801 if (result != ERROR_OK)
805 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
806 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
809 breakpoint->set = false;
814 static void trigger_from_watchpoint(struct trigger *trigger,
815 const struct watchpoint *watchpoint)
817 trigger->address = watchpoint->address;
818 trigger->length = watchpoint->length;
819 trigger->mask = watchpoint->mask;
820 trigger->value = watchpoint->value;
821 trigger->read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
822 trigger->write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
823 trigger->execute = false;
824 /* unique_id is unique across both breakpoints and watchpoints. */
825 trigger->unique_id = watchpoint->unique_id;
828 int riscv_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
830 struct trigger trigger;
831 trigger_from_watchpoint(&trigger, watchpoint);
833 int result = add_trigger(target, &trigger);
834 if (result != ERROR_OK)
836 watchpoint->set = true;
841 int riscv_remove_watchpoint(struct target *target,
842 struct watchpoint *watchpoint)
844 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, watchpoint->address);
846 struct trigger trigger;
847 trigger_from_watchpoint(&trigger, watchpoint);
849 int result = remove_trigger(target, &trigger);
850 if (result != ERROR_OK)
852 watchpoint->set = false;
857 /* Sets *hit_watchpoint to the first watchpoint identified as causing the
860 * The GDB server uses this information to tell GDB what data address has
861 * been hit, which enables GDB to print the hit variable along with its old
863 int riscv_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
865 struct watchpoint *wp = target->watchpoints;
867 if (riscv_rtos_enabled(target))
868 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
869 LOG_DEBUG("Current hartid = %d", riscv_current_hartid(target));
871 /*TODO instead of disassembling the instruction that we think caused the
872 * trigger, check the hit bit of each watchpoint first. The hit bit is
873 * simpler and more reliable to check but as it is optional and relatively
874 * new, not all hardware will implement it */
876 riscv_get_register(target, &dpc, GDB_REGNO_DPC);
877 const uint8_t length = 4;
878 LOG_DEBUG("dpc is 0x%" PRIx64, dpc);
880 /* fetch the instruction at dpc */
881 uint8_t buffer[length];
882 if (target_read_buffer(target, dpc, length, buffer) != ERROR_OK) {
883 LOG_ERROR("Failed to read instruction at dpc 0x%" PRIx64, dpc);
887 uint32_t instruction = 0;
889 for (int i = 0; i < length; i++) {
890 LOG_DEBUG("Next byte is %x", buffer[i]);
891 instruction += (buffer[i] << 8 * i);
893 LOG_DEBUG("Full instruction is %x", instruction);
895 /* find out which memory address is accessed by the instruction at dpc */
896 /* opcode is first 7 bits of the instruction */
897 uint8_t opcode = instruction & 0x7F;
900 riscv_reg_t mem_addr;
902 if (opcode == MATCH_LB || opcode == MATCH_SB) {
903 rs1 = (instruction & 0xf8000) >> 15;
904 riscv_get_register(target, &mem_addr, rs1);
906 if (opcode == MATCH_SB) {
907 LOG_DEBUG("%x is store instruction", instruction);
908 imm = ((instruction & 0xf80) >> 7) | ((instruction & 0xfe000000) >> 20);
910 LOG_DEBUG("%x is load instruction", instruction);
911 imm = (instruction & 0xfff00000) >> 20;
913 /* sign extend 12-bit imm to 16-bits */
917 LOG_DEBUG("memory address=0x%" PRIx64, mem_addr);
919 LOG_DEBUG("%x is not a RV32I load or store", instruction);
924 /*TODO support length/mask */
925 if (wp->address == mem_addr) {
926 *hit_watchpoint = wp;
927 LOG_DEBUG("Hit address=%" TARGET_PRIxADDR, wp->address);
933 /* No match found - either we hit a watchpoint caused by an instruction that
934 * this function does not yet disassemble, or we hit a breakpoint.
936 * OpenOCD will behave as if this function had never been implemented i.e.
937 * report the halt to GDB with no address information. */
942 static int oldriscv_step(struct target *target, int current, uint32_t address,
943 int handle_breakpoints)
945 struct target_type *tt = get_target_type(target);
946 return tt->step(target, current, address, handle_breakpoints);
949 static int old_or_new_riscv_step(struct target *target, int current,
950 target_addr_t address, int handle_breakpoints)
953 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
955 return oldriscv_step(target, current, address, handle_breakpoints);
957 return riscv_openocd_step(target, current, address, handle_breakpoints);
961 static int riscv_examine(struct target *target)
963 LOG_DEBUG("riscv_examine()");
964 if (target_was_examined(target)) {
965 LOG_DEBUG("Target was already examined.");
969 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
971 riscv_info_t *info = (riscv_info_t *) target->arch_info;
972 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
973 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
974 info->dtm_version = get_field(dtmcontrol, DTMCONTROL_VERSION);
975 LOG_DEBUG(" version=0x%x", info->dtm_version);
977 struct target_type *tt = get_target_type(target);
981 int result = tt->init_target(info->cmd_ctx, target);
982 if (result != ERROR_OK)
985 return tt->examine(target);
988 static int oldriscv_poll(struct target *target)
990 struct target_type *tt = get_target_type(target);
991 return tt->poll(target);
994 static int old_or_new_riscv_poll(struct target *target)
998 return oldriscv_poll(target);
1000 return riscv_openocd_poll(target);
1003 int halt_prep(struct target *target)
1006 for (int i = 0; i < riscv_count_harts(target); ++i) {
1007 if (!riscv_hart_enabled(target, i))
1010 LOG_DEBUG("[%s] prep hart, debug_reason=%d", target_name(target),
1011 target->debug_reason);
1012 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1014 if (riscv_is_halted(target)) {
1015 LOG_DEBUG("Hart %d is already halted (reason=%d).", i,
1016 target->debug_reason);
1018 if (r->halt_prep(target) != ERROR_OK)
1026 int riscv_halt_go_all_harts(struct target *target)
1029 for (int i = 0; i < riscv_count_harts(target); ++i) {
1030 if (!riscv_hart_enabled(target, i))
1033 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1035 if (riscv_is_halted(target)) {
1036 LOG_DEBUG("Hart %d is already halted.", i);
1038 if (r->halt_go(target) != ERROR_OK)
1043 riscv_invalidate_register_cache(target);
1048 int halt_go(struct target *target)
1050 riscv_info_t *r = riscv_info(target);
1052 if (!r->is_halted) {
1053 struct target_type *tt = get_target_type(target);
1054 result = tt->halt(target);
1056 result = riscv_halt_go_all_harts(target);
1058 target->state = TARGET_HALTED;
1059 if (target->debug_reason == DBG_REASON_NOTHALTED)
1060 target->debug_reason = DBG_REASON_DBGRQ;
1065 static int halt_finish(struct target *target)
1067 return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1070 int riscv_halt(struct target *target)
1074 if (!r->is_halted) {
1075 struct target_type *tt = get_target_type(target);
1076 return tt->halt(target);
1079 LOG_DEBUG("[%d] halting all harts", target->coreid);
1081 int result = ERROR_OK;
1083 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1084 struct target *t = tlist->target;
1085 if (halt_prep(t) != ERROR_OK)
1086 result = ERROR_FAIL;
1089 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1090 struct target *t = tlist->target;
1091 riscv_info_t *i = riscv_info(t);
1093 if (halt_go(t) != ERROR_OK)
1094 result = ERROR_FAIL;
1098 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1099 struct target *t = tlist->target;
1100 if (halt_finish(t) != ERROR_OK)
1105 if (halt_prep(target) != ERROR_OK)
1106 result = ERROR_FAIL;
1107 if (halt_go(target) != ERROR_OK)
1108 result = ERROR_FAIL;
1109 if (halt_finish(target) != ERROR_OK)
1113 if (riscv_rtos_enabled(target)) {
1114 if (r->rtos_hartid != -1) {
1115 LOG_DEBUG("halt requested on RTOS hartid %d", r->rtos_hartid);
1116 target->rtos->current_threadid = r->rtos_hartid + 1;
1117 target->rtos->current_thread = r->rtos_hartid + 1;
1119 LOG_DEBUG("halt requested, but no known RTOS hartid");
1125 static int riscv_assert_reset(struct target *target)
1127 LOG_DEBUG("[%d]", target->coreid);
1128 struct target_type *tt = get_target_type(target);
1129 riscv_invalidate_register_cache(target);
1130 return tt->assert_reset(target);
1133 static int riscv_deassert_reset(struct target *target)
1135 LOG_DEBUG("[%d]", target->coreid);
1136 struct target_type *tt = get_target_type(target);
1137 return tt->deassert_reset(target);
1140 int riscv_resume_prep_all_harts(struct target *target)
1143 for (int i = 0; i < riscv_count_harts(target); ++i) {
1144 if (!riscv_hart_enabled(target, i))
1147 LOG_DEBUG("prep hart %d", i);
1148 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1150 if (riscv_is_halted(target)) {
1151 if (r->resume_prep(target) != ERROR_OK)
1154 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
1158 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1164 /* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
1165 static int disable_triggers(struct target *target, riscv_reg_t *state)
1169 LOG_DEBUG("deal with triggers");
1171 if (riscv_enumerate_triggers(target) != ERROR_OK)
1174 int hartid = riscv_current_hartid(target);
1175 if (r->manual_hwbp_set) {
1176 /* Look at every trigger that may have been set. */
1177 riscv_reg_t tselect;
1178 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1180 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1181 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1184 if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
1186 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
1188 if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
1192 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1196 /* Just go through the triggers we manage. */
1197 struct watchpoint *watchpoint = target->watchpoints;
1199 while (watchpoint) {
1200 LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
1201 state[i] = watchpoint->set;
1202 if (watchpoint->set) {
1203 if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
1206 watchpoint = watchpoint->next;
1214 static int enable_triggers(struct target *target, riscv_reg_t *state)
1218 int hartid = riscv_current_hartid(target);
1220 if (r->manual_hwbp_set) {
1221 /* Look at every trigger that may have been set. */
1222 riscv_reg_t tselect;
1223 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1225 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1226 if (state[t] != 0) {
1227 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1229 if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
1233 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1237 struct watchpoint *watchpoint = target->watchpoints;
1239 while (watchpoint) {
1240 LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
1242 if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
1245 watchpoint = watchpoint->next;
1254 * Get everything ready to resume.
1256 static int resume_prep(struct target *target, int current,
1257 target_addr_t address, int handle_breakpoints, int debug_execution)
1260 LOG_DEBUG("[%d]", target->coreid);
1263 riscv_set_register(target, GDB_REGNO_PC, address);
1265 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1266 /* To be able to run off a trigger, disable all the triggers, step, and
1267 * then resume as usual. */
1268 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
1270 if (disable_triggers(target, trigger_state) != ERROR_OK)
1273 if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
1276 if (enable_triggers(target, trigger_state) != ERROR_OK)
1281 if (riscv_resume_prep_all_harts(target) != ERROR_OK)
1285 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1292 * Resume all the harts that have been prepped, as close to instantaneous as
1295 static int resume_go(struct target *target, int current,
1296 target_addr_t address, int handle_breakpoints, int debug_execution)
1298 riscv_info_t *r = riscv_info(target);
1300 if (!r->is_halted) {
1301 struct target_type *tt = get_target_type(target);
1302 result = tt->resume(target, current, address, handle_breakpoints,
1305 result = riscv_resume_go_all_harts(target);
1311 static int resume_finish(struct target *target)
1313 register_cache_invalidate(target->reg_cache);
1315 target->state = TARGET_RUNNING;
1316 target->debug_reason = DBG_REASON_NOTHALTED;
1317 return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1321 * @par single_hart When true, only resume a single hart even if SMP is
1322 * configured. This is used to run algorithms on just one hart.
1325 struct target *target,
1327 target_addr_t address,
1328 int handle_breakpoints,
1329 int debug_execution,
1332 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
1333 int result = ERROR_OK;
1334 if (target->smp && !single_hart) {
1335 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1336 struct target *t = tlist->target;
1337 if (resume_prep(t, current, address, handle_breakpoints,
1338 debug_execution) != ERROR_OK)
1339 result = ERROR_FAIL;
1342 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1343 struct target *t = tlist->target;
1344 riscv_info_t *i = riscv_info(t);
1346 if (resume_go(t, current, address, handle_breakpoints,
1347 debug_execution) != ERROR_OK)
1348 result = ERROR_FAIL;
1352 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1353 struct target *t = tlist->target;
1354 if (resume_finish(t) != ERROR_OK)
1359 if (resume_prep(target, current, address, handle_breakpoints,
1360 debug_execution) != ERROR_OK)
1361 result = ERROR_FAIL;
1362 if (resume_go(target, current, address, handle_breakpoints,
1363 debug_execution) != ERROR_OK)
1364 result = ERROR_FAIL;
1365 if (resume_finish(target) != ERROR_OK)
1372 static int riscv_target_resume(struct target *target, int current, target_addr_t address,
1373 int handle_breakpoints, int debug_execution)
1375 return riscv_resume(target, current, address, handle_breakpoints,
1376 debug_execution, false);
1379 static int riscv_select_current_hart(struct target *target)
1382 if (riscv_rtos_enabled(target)) {
1383 if (r->rtos_hartid == -1)
1384 r->rtos_hartid = target->rtos->current_threadid - 1;
1385 return riscv_set_current_hartid(target, r->rtos_hartid);
1387 return riscv_set_current_hartid(target, target->coreid);
1390 static int riscv_mmu(struct target *target, int *enabled)
1392 if (!riscv_enable_virt2phys) {
1397 if (riscv_rtos_enabled(target))
1398 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1400 /* Don't use MMU in explicit or effective M (machine) mode */
1402 if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
1403 LOG_ERROR("Failed to read priv register.");
1407 riscv_reg_t mstatus;
1408 if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
1409 LOG_ERROR("Failed to read mstatus register.");
1413 if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
1414 LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
1420 if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
1421 LOG_DEBUG("Couldn't read SATP.");
1422 /* If we can't read SATP, then there must not be an MMU. */
1427 if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
1428 LOG_DEBUG("MMU is disabled.");
1431 LOG_DEBUG("MMU is enabled.");
1438 static int riscv_address_translate(struct target *target,
1439 target_addr_t virtual, target_addr_t *physical)
1442 riscv_reg_t satp_value;
1445 target_addr_t table_address;
1446 virt2phys_info_t *info;
1450 if (riscv_rtos_enabled(target))
1451 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1453 int result = riscv_get_register(target, &satp_value, GDB_REGNO_SATP);
1454 if (result != ERROR_OK)
1457 unsigned xlen = riscv_xlen(target);
1458 mode = get_field(satp_value, RISCV_SATP_MODE(xlen));
1460 case SATP_MODE_SV32:
1463 case SATP_MODE_SV39:
1466 case SATP_MODE_SV48:
1470 LOG_ERROR("No translation or protection." \
1471 " (satp: 0x%" PRIx64 ")", satp_value);
1474 LOG_ERROR("The translation mode is not supported." \
1475 " (satp: 0x%" PRIx64 ")", satp_value);
1478 LOG_DEBUG("virtual=0x%" TARGET_PRIxADDR "; mode=%s", virtual, info->name);
1480 /* verify bits xlen-1:va_bits-1 are all equal */
1481 target_addr_t mask = ((target_addr_t)1 << (xlen - (info->va_bits - 1))) - 1;
1482 target_addr_t masked_msbs = (virtual >> (info->va_bits - 1)) & mask;
1483 if (masked_msbs != 0 && masked_msbs != mask) {
1484 LOG_ERROR("Virtual address 0x%" TARGET_PRIxADDR " is not sign-extended "
1485 "for %s mode.", virtual, info->name);
1489 ppn_value = get_field(satp_value, RISCV_SATP_PPN(xlen));
1490 table_address = ppn_value << RISCV_PGSHIFT;
1491 i = info->level - 1;
1493 uint64_t vpn = virtual >> info->vpn_shift[i];
1494 vpn &= info->vpn_mask[i];
1495 target_addr_t pte_address = table_address +
1496 (vpn << info->pte_shift);
1498 assert(info->pte_shift <= 3);
1499 int retval = r->read_memory(target, pte_address,
1500 4, (1 << info->pte_shift) / 4, buffer, 4);
1501 if (retval != ERROR_OK)
1504 if (info->pte_shift == 2)
1505 pte = buf_get_u32(buffer, 0, 32);
1507 pte = buf_get_u64(buffer, 0, 64);
1509 LOG_DEBUG("i=%d; PTE @0x%" TARGET_PRIxADDR " = 0x%" PRIx64, i,
1512 if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W)))
1515 if ((pte & PTE_R) || (pte & PTE_X)) /* Found leaf PTE. */
1521 ppn_value = pte >> PTE_PPN_SHIFT;
1522 table_address = ppn_value << RISCV_PGSHIFT;
1526 LOG_ERROR("Couldn't find the PTE.");
1530 /* Make sure to clear out the high bits that may be set. */
1531 *physical = virtual & (((target_addr_t)1 << info->va_bits) - 1);
1533 while (i < info->level) {
1534 ppn_value = pte >> info->pte_ppn_shift[i];
1535 ppn_value &= info->pte_ppn_mask[i];
1536 *physical &= ~(((target_addr_t)info->pa_ppn_mask[i]) <<
1537 info->pa_ppn_shift[i]);
1538 *physical |= (ppn_value << info->pa_ppn_shift[i]);
1541 LOG_DEBUG("0x%" TARGET_PRIxADDR " -> 0x%" TARGET_PRIxADDR, virtual,
1547 static int riscv_virt2phys(struct target *target, target_addr_t virtual, target_addr_t *physical)
1550 if (riscv_mmu(target, &enabled) == ERROR_OK) {
1554 if (riscv_address_translate(target, virtual, physical) == ERROR_OK)
1561 static int riscv_read_phys_memory(struct target *target, target_addr_t phys_address,
1562 uint32_t size, uint32_t count, uint8_t *buffer)
1565 if (riscv_select_current_hart(target) != ERROR_OK)
1567 return r->read_memory(target, phys_address, size, count, buffer, size);
1570 static int riscv_read_memory(struct target *target, target_addr_t address,
1571 uint32_t size, uint32_t count, uint8_t *buffer)
1574 LOG_WARNING("0-length read from 0x%" TARGET_PRIxADDR, address);
1578 if (riscv_select_current_hart(target) != ERROR_OK)
1581 target_addr_t physical_addr;
1582 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1583 address = physical_addr;
1586 return r->read_memory(target, address, size, count, buffer, size);
1589 static int riscv_write_phys_memory(struct target *target, target_addr_t phys_address,
1590 uint32_t size, uint32_t count, const uint8_t *buffer)
1592 if (riscv_select_current_hart(target) != ERROR_OK)
1594 struct target_type *tt = get_target_type(target);
1595 return tt->write_memory(target, phys_address, size, count, buffer);
1598 static int riscv_write_memory(struct target *target, target_addr_t address,
1599 uint32_t size, uint32_t count, const uint8_t *buffer)
1602 LOG_WARNING("0-length write to 0x%" TARGET_PRIxADDR, address);
1606 if (riscv_select_current_hart(target) != ERROR_OK)
1609 target_addr_t physical_addr;
1610 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1611 address = physical_addr;
1613 struct target_type *tt = get_target_type(target);
1614 return tt->write_memory(target, address, size, count, buffer);
1617 const char *riscv_get_gdb_arch(struct target *target)
1619 switch (riscv_xlen(target)) {
1621 return "riscv:rv32";
1623 return "riscv:rv64";
1625 LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
1629 static int riscv_get_gdb_reg_list_internal(struct target *target,
1630 struct reg **reg_list[], int *reg_list_size,
1631 enum target_register_class reg_class, bool read)
1634 LOG_DEBUG("rtos_hartid=%d, current_hartid=%d, reg_class=%d, read=%d",
1635 r->rtos_hartid, r->current_hartid, reg_class, read);
1637 if (!target->reg_cache) {
1638 LOG_ERROR("Target not initialized. Return ERROR_FAIL.");
1642 if (riscv_select_current_hart(target) != ERROR_OK)
1645 switch (reg_class) {
1646 case REG_CLASS_GENERAL:
1647 *reg_list_size = 33;
1650 *reg_list_size = target->reg_cache->num_regs;
1653 LOG_ERROR("Unsupported reg_class: %d", reg_class);
1657 *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
1661 for (int i = 0; i < *reg_list_size; i++) {
1662 assert(!target->reg_cache->reg_list[i].valid ||
1663 target->reg_cache->reg_list[i].size > 0);
1664 (*reg_list)[i] = &target->reg_cache->reg_list[i];
1666 target->reg_cache->reg_list[i].exist &&
1667 !target->reg_cache->reg_list[i].valid) {
1668 if (target->reg_cache->reg_list[i].type->get(
1669 &target->reg_cache->reg_list[i]) != ERROR_OK)
1677 static int riscv_get_gdb_reg_list_noread(struct target *target,
1678 struct reg **reg_list[], int *reg_list_size,
1679 enum target_register_class reg_class)
1681 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1685 static int riscv_get_gdb_reg_list(struct target *target,
1686 struct reg **reg_list[], int *reg_list_size,
1687 enum target_register_class reg_class)
1689 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1693 static int riscv_arch_state(struct target *target)
1695 struct target_type *tt = get_target_type(target);
1696 return tt->arch_state(target);
1699 /* Algorithm must end with a software breakpoint instruction. */
1700 static int riscv_run_algorithm(struct target *target, int num_mem_params,
1701 struct mem_param *mem_params, int num_reg_params,
1702 struct reg_param *reg_params, target_addr_t entry_point,
1703 target_addr_t exit_point, int timeout_ms, void *arch_info)
1705 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1706 int hartid = riscv_current_hartid(target);
1708 if (num_mem_params > 0) {
1709 LOG_ERROR("Memory parameters are not supported for RISC-V algorithms.");
1713 if (target->state != TARGET_HALTED) {
1714 LOG_WARNING("target not halted");
1715 return ERROR_TARGET_NOT_HALTED;
1718 /* Save registers */
1719 struct reg *reg_pc = register_get_by_name(target->reg_cache, "pc", true);
1720 if (!reg_pc || reg_pc->type->get(reg_pc) != ERROR_OK)
1722 uint64_t saved_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1723 LOG_DEBUG("saved_pc=0x%" PRIx64, saved_pc);
1725 uint64_t saved_regs[32];
1726 for (int i = 0; i < num_reg_params; i++) {
1727 LOG_DEBUG("save %s", reg_params[i].reg_name);
1728 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, false);
1730 LOG_ERROR("Couldn't find register named '%s'", reg_params[i].reg_name);
1734 if (r->size != reg_params[i].size) {
1735 LOG_ERROR("Register %s is %d bits instead of %d bits.",
1736 reg_params[i].reg_name, r->size, reg_params[i].size);
1740 if (r->number > GDB_REGNO_XPR31) {
1741 LOG_ERROR("Only GPRs can be use as argument registers.");
1745 if (r->type->get(r) != ERROR_OK)
1747 saved_regs[r->number] = buf_get_u64(r->value, 0, r->size);
1749 if (reg_params[i].direction == PARAM_OUT || reg_params[i].direction == PARAM_IN_OUT) {
1750 if (r->type->set(r, reg_params[i].value) != ERROR_OK)
1756 /* Disable Interrupts before attempting to run the algorithm. */
1757 uint64_t current_mstatus;
1758 uint8_t mstatus_bytes[8] = { 0 };
1760 LOG_DEBUG("Disabling Interrupts");
1761 struct reg *reg_mstatus = register_get_by_name(target->reg_cache,
1764 LOG_ERROR("Couldn't find mstatus!");
1768 reg_mstatus->type->get(reg_mstatus);
1769 current_mstatus = buf_get_u64(reg_mstatus->value, 0, reg_mstatus->size);
1770 uint64_t ie_mask = MSTATUS_MIE | MSTATUS_HIE | MSTATUS_SIE | MSTATUS_UIE;
1771 buf_set_u64(mstatus_bytes, 0, info->xlen[0], set_field(current_mstatus,
1774 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1777 LOG_DEBUG("resume at 0x%" TARGET_PRIxADDR, entry_point);
1778 if (riscv_resume(target, 0, entry_point, 0, 0, true) != ERROR_OK)
1781 int64_t start = timeval_ms();
1782 while (target->state != TARGET_HALTED) {
1783 LOG_DEBUG("poll()");
1784 int64_t now = timeval_ms();
1785 if (now - start > timeout_ms) {
1786 LOG_ERROR("Algorithm timed out after %" PRId64 " ms.", now - start);
1788 old_or_new_riscv_poll(target);
1789 enum gdb_regno regnums[] = {
1790 GDB_REGNO_RA, GDB_REGNO_SP, GDB_REGNO_GP, GDB_REGNO_TP,
1791 GDB_REGNO_T0, GDB_REGNO_T1, GDB_REGNO_T2, GDB_REGNO_FP,
1792 GDB_REGNO_S1, GDB_REGNO_A0, GDB_REGNO_A1, GDB_REGNO_A2,
1793 GDB_REGNO_A3, GDB_REGNO_A4, GDB_REGNO_A5, GDB_REGNO_A6,
1794 GDB_REGNO_A7, GDB_REGNO_S2, GDB_REGNO_S3, GDB_REGNO_S4,
1795 GDB_REGNO_S5, GDB_REGNO_S6, GDB_REGNO_S7, GDB_REGNO_S8,
1796 GDB_REGNO_S9, GDB_REGNO_S10, GDB_REGNO_S11, GDB_REGNO_T3,
1797 GDB_REGNO_T4, GDB_REGNO_T5, GDB_REGNO_T6,
1799 GDB_REGNO_MSTATUS, GDB_REGNO_MEPC, GDB_REGNO_MCAUSE,
1801 for (unsigned i = 0; i < ARRAY_SIZE(regnums); i++) {
1802 enum gdb_regno regno = regnums[i];
1803 riscv_reg_t reg_value;
1804 if (riscv_get_register(target, ®_value, regno) != ERROR_OK)
1806 LOG_ERROR("%s = 0x%" PRIx64, gdb_regno_name(regno), reg_value);
1808 return ERROR_TARGET_TIMEOUT;
1811 int result = old_or_new_riscv_poll(target);
1812 if (result != ERROR_OK)
1816 /* The current hart id might have been changed in poll(). */
1817 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1820 if (reg_pc->type->get(reg_pc) != ERROR_OK)
1822 uint64_t final_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1823 if (exit_point && final_pc != exit_point) {
1824 LOG_ERROR("PC ended up at 0x%" PRIx64 " instead of 0x%"
1825 TARGET_PRIxADDR, final_pc, exit_point);
1829 /* Restore Interrupts */
1830 LOG_DEBUG("Restoring Interrupts");
1831 buf_set_u64(mstatus_bytes, 0, info->xlen[0], current_mstatus);
1832 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1834 /* Restore registers */
1835 uint8_t buf[8] = { 0 };
1836 buf_set_u64(buf, 0, info->xlen[0], saved_pc);
1837 if (reg_pc->type->set(reg_pc, buf) != ERROR_OK)
1840 for (int i = 0; i < num_reg_params; i++) {
1841 if (reg_params[i].direction == PARAM_IN ||
1842 reg_params[i].direction == PARAM_IN_OUT) {
1843 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, false);
1844 if (r->type->get(r) != ERROR_OK) {
1845 LOG_ERROR("get(%s) failed", r->name);
1848 buf_cpy(r->value, reg_params[i].value, reg_params[i].size);
1850 LOG_DEBUG("restore %s", reg_params[i].reg_name);
1851 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, false);
1852 buf_set_u64(buf, 0, info->xlen[0], saved_regs[r->number]);
1853 if (r->type->set(r, buf) != ERROR_OK) {
1854 LOG_ERROR("set(%s) failed", r->name);
1862 static int riscv_checksum_memory(struct target *target,
1863 target_addr_t address, uint32_t count,
1866 struct working_area *crc_algorithm;
1867 struct reg_param reg_params[2];
1870 LOG_DEBUG("address=0x%" TARGET_PRIxADDR "; count=0x%" PRIx32, address, count);
1872 static const uint8_t riscv32_crc_code[] = {
1873 #include "../../contrib/loaders/checksum/riscv32_crc.inc"
1875 static const uint8_t riscv64_crc_code[] = {
1876 #include "../../contrib/loaders/checksum/riscv64_crc.inc"
1879 static const uint8_t *crc_code;
1881 unsigned xlen = riscv_xlen(target);
1882 unsigned crc_code_size;
1884 crc_code = riscv32_crc_code;
1885 crc_code_size = sizeof(riscv32_crc_code);
1887 crc_code = riscv64_crc_code;
1888 crc_code_size = sizeof(riscv64_crc_code);
1891 if (count < crc_code_size * 4) {
1892 /* Don't use the algorithm for relatively small buffers. It's faster
1893 * just to read the memory. target_checksum_memory() will take care of
1894 * that if we fail. */
1898 retval = target_alloc_working_area(target, crc_code_size, &crc_algorithm);
1899 if (retval != ERROR_OK)
1902 if (crc_algorithm->address + crc_algorithm->size > address &&
1903 crc_algorithm->address < address + count) {
1904 /* Region to checksum overlaps with the work area we've been assigned.
1905 * Bail. (Would be better to manually checksum what we read there, and
1906 * use the algorithm for the rest.) */
1907 target_free_working_area(target, crc_algorithm);
1911 retval = target_write_buffer(target, crc_algorithm->address, crc_code_size,
1913 if (retval != ERROR_OK) {
1914 LOG_ERROR("Failed to write code to " TARGET_ADDR_FMT ": %d",
1915 crc_algorithm->address, retval);
1916 target_free_working_area(target, crc_algorithm);
1920 init_reg_param(®_params[0], "a0", xlen, PARAM_IN_OUT);
1921 init_reg_param(®_params[1], "a1", xlen, PARAM_OUT);
1922 buf_set_u64(reg_params[0].value, 0, xlen, address);
1923 buf_set_u64(reg_params[1].value, 0, xlen, count);
1925 /* 20 second timeout/megabyte */
1926 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1928 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1929 crc_algorithm->address,
1930 0, /* Leave exit point unspecified because we don't know. */
1933 if (retval == ERROR_OK)
1934 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1936 LOG_ERROR("error executing RISC-V CRC algorithm");
1938 destroy_reg_param(®_params[0]);
1939 destroy_reg_param(®_params[1]);
1941 target_free_working_area(target, crc_algorithm);
1943 LOG_DEBUG("checksum=0x%" PRIx32 ", result=%d", *checksum, retval);
1948 /*** OpenOCD Helper Functions ***/
1950 enum riscv_poll_hart {
1952 RPH_DISCOVERED_HALTED,
1953 RPH_DISCOVERED_RUNNING,
1956 static enum riscv_poll_hart riscv_poll_hart(struct target *target, int hartid)
1959 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1962 LOG_DEBUG("polling hart %d, target->state=%d", hartid, target->state);
1964 /* If OpenOCD thinks we're running but this hart is halted then it's time
1965 * to raise an event. */
1966 bool halted = riscv_is_halted(target);
1967 if (target->state != TARGET_HALTED && halted) {
1968 LOG_DEBUG(" triggered a halt");
1970 return RPH_DISCOVERED_HALTED;
1971 } else if (target->state != TARGET_RUNNING && !halted) {
1972 LOG_DEBUG(" triggered running");
1973 target->state = TARGET_RUNNING;
1974 target->debug_reason = DBG_REASON_NOTHALTED;
1975 return RPH_DISCOVERED_RUNNING;
1978 return RPH_NO_CHANGE;
1981 int set_debug_reason(struct target *target, enum riscv_halt_reason halt_reason)
1983 switch (halt_reason) {
1984 case RISCV_HALT_BREAKPOINT:
1985 target->debug_reason = DBG_REASON_BREAKPOINT;
1987 case RISCV_HALT_TRIGGER:
1988 target->debug_reason = DBG_REASON_WATCHPOINT;
1990 case RISCV_HALT_INTERRUPT:
1991 case RISCV_HALT_GROUP:
1992 target->debug_reason = DBG_REASON_DBGRQ;
1994 case RISCV_HALT_SINGLESTEP:
1995 target->debug_reason = DBG_REASON_SINGLESTEP;
1997 case RISCV_HALT_UNKNOWN:
1998 target->debug_reason = DBG_REASON_UNDEFINED;
2000 case RISCV_HALT_ERROR:
2003 LOG_DEBUG("[%s] debug_reason=%d", target_name(target), target->debug_reason);
2007 /*** OpenOCD Interface ***/
2008 int riscv_openocd_poll(struct target *target)
2010 LOG_DEBUG("polling all harts");
2011 int halted_hart = -1;
2012 if (riscv_rtos_enabled(target)) {
2013 /* Check every hart for an event. */
2014 for (int i = 0; i < riscv_count_harts(target); ++i) {
2015 enum riscv_poll_hart out = riscv_poll_hart(target, i);
2018 case RPH_DISCOVERED_RUNNING:
2020 case RPH_DISCOVERED_HALTED:
2027 if (halted_hart == -1) {
2028 LOG_DEBUG(" no harts just halted, target->state=%d", target->state);
2031 LOG_DEBUG(" hart %d halted", halted_hart);
2033 target->state = TARGET_HALTED;
2034 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
2035 if (set_debug_reason(target, halt_reason) != ERROR_OK)
2038 target->rtos->current_threadid = halted_hart + 1;
2039 target->rtos->current_thread = halted_hart + 1;
2040 riscv_set_rtos_hartid(target, halted_hart);
2042 /* If we're here then at least one hart triggered. That means we want
2043 * to go and halt _every_ hart (configured with -rtos riscv) in the
2044 * system, as that's the invariant we hold here. Some harts might have
2045 * already halted (as we're either in single-step mode or they also
2046 * triggered a breakpoint), so don't attempt to halt those harts.
2047 * riscv_halt() will do all that for us. */
2050 } else if (target->smp) {
2051 unsigned halts_discovered = 0;
2052 unsigned total_targets = 0;
2053 unsigned should_remain_halted = 0;
2054 unsigned should_resume = 0;
2056 for (struct target_list *list = target->head; list != NULL;
2057 list = list->next, i++) {
2059 struct target *t = list->target;
2060 riscv_info_t *r = riscv_info(t);
2061 enum riscv_poll_hart out = riscv_poll_hart(t, r->current_hartid);
2065 case RPH_DISCOVERED_RUNNING:
2066 t->state = TARGET_RUNNING;
2067 t->debug_reason = DBG_REASON_NOTHALTED;
2069 case RPH_DISCOVERED_HALTED:
2071 t->state = TARGET_HALTED;
2072 enum riscv_halt_reason halt_reason =
2073 riscv_halt_reason(t, r->current_hartid);
2074 if (set_debug_reason(t, halt_reason) != ERROR_OK)
2077 if (halt_reason == RISCV_HALT_BREAKPOINT) {
2079 switch (riscv_semihosting(t, &retval)) {
2082 /* This hart should remain halted. */
2083 should_remain_halted++;
2086 /* This hart should be resumed, along with any other
2087 * harts that halted due to haltgroups. */
2093 } else if (halt_reason != RISCV_HALT_GROUP) {
2094 should_remain_halted++;
2103 LOG_DEBUG("should_remain_halted=%d, should_resume=%d",
2104 should_remain_halted, should_resume);
2105 if (should_remain_halted && should_resume) {
2106 LOG_WARNING("%d harts should remain halted, and %d should resume.",
2107 should_remain_halted, should_resume);
2109 if (should_remain_halted) {
2110 LOG_DEBUG("halt all");
2112 } else if (should_resume) {
2113 LOG_DEBUG("resume all");
2114 riscv_resume(target, true, 0, 0, 0, false);
2119 enum riscv_poll_hart out = riscv_poll_hart(target,
2120 riscv_current_hartid(target));
2121 if (out == RPH_NO_CHANGE || out == RPH_DISCOVERED_RUNNING)
2123 else if (out == RPH_ERROR)
2126 halted_hart = riscv_current_hartid(target);
2127 LOG_DEBUG(" hart %d halted", halted_hart);
2129 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
2130 if (set_debug_reason(target, halt_reason) != ERROR_OK)
2132 target->state = TARGET_HALTED;
2135 if (target->debug_reason == DBG_REASON_BREAKPOINT) {
2137 switch (riscv_semihosting(target, &retval)) {
2140 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2143 if (riscv_resume(target, true, 0, 0, 0, false) != ERROR_OK)
2150 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2156 int riscv_openocd_step(struct target *target, int current,
2157 target_addr_t address, int handle_breakpoints)
2159 LOG_DEBUG("stepping rtos hart");
2162 riscv_set_register(target, GDB_REGNO_PC, address);
2164 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
2165 if (disable_triggers(target, trigger_state) != ERROR_OK)
2168 int out = riscv_step_rtos_hart(target);
2169 if (out != ERROR_OK) {
2170 LOG_ERROR("unable to step rtos hart");
2174 register_cache_invalidate(target->reg_cache);
2176 if (enable_triggers(target, trigger_state) != ERROR_OK)
2179 target->state = TARGET_RUNNING;
2180 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
2181 target->state = TARGET_HALTED;
2182 target->debug_reason = DBG_REASON_SINGLESTEP;
2183 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2187 /* Command Handlers */
2188 COMMAND_HANDLER(riscv_set_command_timeout_sec)
2190 if (CMD_ARGC != 1) {
2191 LOG_ERROR("Command takes exactly 1 parameter");
2192 return ERROR_COMMAND_SYNTAX_ERROR;
2194 int timeout = atoi(CMD_ARGV[0]);
2196 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2200 riscv_command_timeout_sec = timeout;
2205 COMMAND_HANDLER(riscv_set_reset_timeout_sec)
2207 if (CMD_ARGC != 1) {
2208 LOG_ERROR("Command takes exactly 1 parameter");
2209 return ERROR_COMMAND_SYNTAX_ERROR;
2211 int timeout = atoi(CMD_ARGV[0]);
2213 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2217 riscv_reset_timeout_sec = timeout;
2221 COMMAND_HANDLER(riscv_test_compliance) {
2223 struct target *target = get_current_target(CMD_CTX);
2228 LOG_ERROR("Command does not take any parameters.");
2229 return ERROR_COMMAND_SYNTAX_ERROR;
2232 if (r->test_compliance) {
2233 return r->test_compliance(target);
2235 LOG_ERROR("This target does not support this command (may implement an older version of the spec).");
2240 COMMAND_HANDLER(riscv_set_prefer_sba)
2242 if (CMD_ARGC != 1) {
2243 LOG_ERROR("Command takes exactly 1 parameter");
2244 return ERROR_COMMAND_SYNTAX_ERROR;
2246 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_prefer_sba);
2250 COMMAND_HANDLER(riscv_set_enable_virtual)
2252 if (CMD_ARGC != 1) {
2253 LOG_ERROR("Command takes exactly 1 parameter");
2254 return ERROR_COMMAND_SYNTAX_ERROR;
2256 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virtual);
2260 void parse_error(const char *string, char c, unsigned position)
2262 char buf[position+2];
2263 for (unsigned i = 0; i < position; i++)
2265 buf[position] = '^';
2266 buf[position + 1] = 0;
2268 LOG_ERROR("Parse error at character %c in:", c);
2269 LOG_ERROR("%s", string);
2270 LOG_ERROR("%s", buf);
2273 int parse_ranges(range_t **ranges, const char **argv)
2275 for (unsigned pass = 0; pass < 2; pass++) {
2278 bool parse_low = true;
2280 for (unsigned i = 0; i == 0 || argv[0][i-1]; i++) {
2281 char c = argv[0][i];
2283 /* Ignore whitespace. */
2291 } else if (c == '-') {
2293 } else if (c == ',' || c == 0) {
2295 (*ranges)[range].low = low;
2296 (*ranges)[range].high = low;
2301 parse_error(argv[0], c, i);
2302 return ERROR_COMMAND_SYNTAX_ERROR;
2309 } else if (c == ',' || c == 0) {
2312 (*ranges)[range].low = low;
2313 (*ranges)[range].high = high;
2319 parse_error(argv[0], c, i);
2320 return ERROR_COMMAND_SYNTAX_ERROR;
2327 *ranges = calloc(range + 2, sizeof(range_t));
2331 (*ranges)[range].low = 1;
2332 (*ranges)[range].high = 0;
2339 COMMAND_HANDLER(riscv_set_expose_csrs)
2341 if (CMD_ARGC != 1) {
2342 LOG_ERROR("Command takes exactly 1 parameter");
2343 return ERROR_COMMAND_SYNTAX_ERROR;
2346 return parse_ranges(&expose_csr, CMD_ARGV);
2349 COMMAND_HANDLER(riscv_set_expose_custom)
2351 if (CMD_ARGC != 1) {
2352 LOG_ERROR("Command takes exactly 1 parameter");
2353 return ERROR_COMMAND_SYNTAX_ERROR;
2356 return parse_ranges(&expose_custom, CMD_ARGV);
2359 COMMAND_HANDLER(riscv_authdata_read)
2361 if (CMD_ARGC != 0) {
2362 LOG_ERROR("Command takes no parameters");
2363 return ERROR_COMMAND_SYNTAX_ERROR;
2366 struct target *target = get_current_target(CMD_CTX);
2368 LOG_ERROR("target is NULL!");
2374 LOG_ERROR("riscv_info is NULL!");
2378 if (r->authdata_read) {
2380 if (r->authdata_read(target, &value) != ERROR_OK)
2382 command_print_sameline(CMD, "0x%08" PRIx32, value);
2385 LOG_ERROR("authdata_read is not implemented for this target.");
2390 COMMAND_HANDLER(riscv_authdata_write)
2392 if (CMD_ARGC != 1) {
2393 LOG_ERROR("Command takes exactly 1 argument");
2394 return ERROR_COMMAND_SYNTAX_ERROR;
2397 struct target *target = get_current_target(CMD_CTX);
2401 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], value);
2403 if (r->authdata_write) {
2404 return r->authdata_write(target, value);
2406 LOG_ERROR("authdata_write is not implemented for this target.");
2411 COMMAND_HANDLER(riscv_dmi_read)
2413 if (CMD_ARGC != 1) {
2414 LOG_ERROR("Command takes 1 parameter");
2415 return ERROR_COMMAND_SYNTAX_ERROR;
2418 struct target *target = get_current_target(CMD_CTX);
2420 LOG_ERROR("target is NULL!");
2426 LOG_ERROR("riscv_info is NULL!");
2431 uint32_t address, value;
2432 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2433 if (r->dmi_read(target, &value, address) != ERROR_OK)
2435 command_print(CMD, "0x%" PRIx32, value);
2438 LOG_ERROR("dmi_read is not implemented for this target.");
2444 COMMAND_HANDLER(riscv_dmi_write)
2446 if (CMD_ARGC != 2) {
2447 LOG_ERROR("Command takes exactly 2 arguments");
2448 return ERROR_COMMAND_SYNTAX_ERROR;
2451 struct target *target = get_current_target(CMD_CTX);
2454 uint32_t address, value;
2455 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2456 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2459 return r->dmi_write(target, address, value);
2461 LOG_ERROR("dmi_write is not implemented for this target.");
2466 COMMAND_HANDLER(riscv_test_sba_config_reg)
2468 if (CMD_ARGC != 4) {
2469 LOG_ERROR("Command takes exactly 4 arguments");
2470 return ERROR_COMMAND_SYNTAX_ERROR;
2473 struct target *target = get_current_target(CMD_CTX);
2476 target_addr_t legal_address;
2478 target_addr_t illegal_address;
2479 bool run_sbbusyerror_test;
2481 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[0], legal_address);
2482 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], num_words);
2483 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[2], illegal_address);
2484 COMMAND_PARSE_ON_OFF(CMD_ARGV[3], run_sbbusyerror_test);
2486 if (r->test_sba_config_reg) {
2487 return r->test_sba_config_reg(target, legal_address, num_words,
2488 illegal_address, run_sbbusyerror_test);
2490 LOG_ERROR("test_sba_config_reg is not implemented for this target.");
2495 COMMAND_HANDLER(riscv_reset_delays)
2500 LOG_ERROR("Command takes at most one argument");
2501 return ERROR_COMMAND_SYNTAX_ERROR;
2505 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], wait);
2507 struct target *target = get_current_target(CMD_CTX);
2509 r->reset_delays_wait = wait;
2513 COMMAND_HANDLER(riscv_set_ir)
2515 if (CMD_ARGC != 2) {
2516 LOG_ERROR("Command takes exactly 2 arguments");
2517 return ERROR_COMMAND_SYNTAX_ERROR;
2521 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2523 if (!strcmp(CMD_ARGV[0], "idcode"))
2524 buf_set_u32(ir_idcode, 0, 32, value);
2525 else if (!strcmp(CMD_ARGV[0], "dtmcs"))
2526 buf_set_u32(ir_dtmcontrol, 0, 32, value);
2527 else if (!strcmp(CMD_ARGV[0], "dmi"))
2528 buf_set_u32(ir_dbus, 0, 32, value);
2535 COMMAND_HANDLER(riscv_resume_order)
2538 LOG_ERROR("Command takes at most one argument");
2539 return ERROR_COMMAND_SYNTAX_ERROR;
2542 if (!strcmp(CMD_ARGV[0], "normal")) {
2543 resume_order = RO_NORMAL;
2544 } else if (!strcmp(CMD_ARGV[0], "reversed")) {
2545 resume_order = RO_REVERSED;
2547 LOG_ERROR("Unsupported resume order: %s", CMD_ARGV[0]);
2554 COMMAND_HANDLER(riscv_use_bscan_tunnel)
2557 int tunnel_type = BSCAN_TUNNEL_NESTED_TAP;
2560 LOG_ERROR("Command takes at most two arguments");
2561 return ERROR_COMMAND_SYNTAX_ERROR;
2562 } else if (CMD_ARGC == 1) {
2563 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2564 } else if (CMD_ARGC == 2) {
2565 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2566 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], tunnel_type);
2568 if (tunnel_type == BSCAN_TUNNEL_NESTED_TAP)
2569 LOG_INFO("Nested Tap based Bscan Tunnel Selected");
2570 else if (tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
2571 LOG_INFO("Simple Register based Bscan Tunnel Selected");
2573 LOG_INFO("Invalid Tunnel type selected ! : selecting default Nested Tap Type");
2575 bscan_tunnel_type = tunnel_type;
2576 bscan_tunnel_ir_width = irwidth;
2580 COMMAND_HANDLER(riscv_set_enable_virt2phys)
2582 if (CMD_ARGC != 1) {
2583 LOG_ERROR("Command takes exactly 1 parameter");
2584 return ERROR_COMMAND_SYNTAX_ERROR;
2586 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virt2phys);
2590 COMMAND_HANDLER(riscv_set_ebreakm)
2592 if (CMD_ARGC != 1) {
2593 LOG_ERROR("Command takes exactly 1 parameter");
2594 return ERROR_COMMAND_SYNTAX_ERROR;
2596 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreakm);
2600 COMMAND_HANDLER(riscv_set_ebreaks)
2602 if (CMD_ARGC != 1) {
2603 LOG_ERROR("Command takes exactly 1 parameter");
2604 return ERROR_COMMAND_SYNTAX_ERROR;
2606 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaks);
2610 COMMAND_HANDLER(riscv_set_ebreaku)
2612 if (CMD_ARGC != 1) {
2613 LOG_ERROR("Command takes exactly 1 parameter");
2614 return ERROR_COMMAND_SYNTAX_ERROR;
2616 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaku);
2620 static const struct command_registration riscv_exec_command_handlers[] = {
2622 .name = "test_compliance",
2623 .handler = riscv_test_compliance,
2625 .mode = COMMAND_EXEC,
2626 .help = "Runs a basic compliance test suite against the RISC-V Debug Spec."
2629 .name = "set_command_timeout_sec",
2630 .handler = riscv_set_command_timeout_sec,
2631 .mode = COMMAND_ANY,
2633 .help = "Set the wall-clock timeout (in seconds) for individual commands"
2636 .name = "set_reset_timeout_sec",
2637 .handler = riscv_set_reset_timeout_sec,
2638 .mode = COMMAND_ANY,
2640 .help = "Set the wall-clock timeout (in seconds) after reset is deasserted"
2643 .name = "set_prefer_sba",
2644 .handler = riscv_set_prefer_sba,
2645 .mode = COMMAND_ANY,
2647 .help = "When on, prefer to use System Bus Access to access memory. "
2648 "When off (default), prefer to use the Program Buffer to access memory."
2651 .name = "set_enable_virtual",
2652 .handler = riscv_set_enable_virtual,
2653 .mode = COMMAND_ANY,
2655 .help = "When on, memory accesses are performed on physical or virtual "
2656 "memory depending on the current system configuration. "
2657 "When off (default), all memory accessses are performed on physical memory."
2660 .name = "expose_csrs",
2661 .handler = riscv_set_expose_csrs,
2662 .mode = COMMAND_ANY,
2663 .usage = "n0[-m0][,n1[-m1]]...",
2664 .help = "Configure a list of inclusive ranges for CSRs to expose in "
2665 "addition to the standard ones. This must be executed before "
2669 .name = "expose_custom",
2670 .handler = riscv_set_expose_custom,
2671 .mode = COMMAND_ANY,
2672 .usage = "n0[-m0][,n1[-m1]]...",
2673 .help = "Configure a list of inclusive ranges for custom registers to "
2674 "expose. custom0 is accessed as abstract register number 0xc000, "
2675 "etc. This must be executed before `init`."
2678 .name = "authdata_read",
2679 .handler = riscv_authdata_read,
2681 .mode = COMMAND_ANY,
2682 .help = "Return the 32-bit value read from authdata."
2685 .name = "authdata_write",
2686 .handler = riscv_authdata_write,
2687 .mode = COMMAND_ANY,
2689 .help = "Write the 32-bit value to authdata."
2693 .handler = riscv_dmi_read,
2694 .mode = COMMAND_ANY,
2696 .help = "Perform a 32-bit DMI read at address, returning the value."
2699 .name = "dmi_write",
2700 .handler = riscv_dmi_write,
2701 .mode = COMMAND_ANY,
2702 .usage = "address value",
2703 .help = "Perform a 32-bit DMI write of value at address."
2706 .name = "test_sba_config_reg",
2707 .handler = riscv_test_sba_config_reg,
2708 .mode = COMMAND_ANY,
2709 .usage = "legal_address num_words "
2710 "illegal_address run_sbbusyerror_test[on/off]",
2711 .help = "Perform a series of tests on the SBCS register. "
2712 "Inputs are a legal, 128-byte aligned address and a number of words to "
2713 "read/write starting at that address (i.e., address range [legal address, "
2714 "legal_address+word_size*num_words) must be legally readable/writable), "
2715 "an illegal, 128-byte aligned address for error flag/handling cases, "
2716 "and whether sbbusyerror test should be run."
2719 .name = "reset_delays",
2720 .handler = riscv_reset_delays,
2721 .mode = COMMAND_ANY,
2723 .help = "OpenOCD learns how many Run-Test/Idle cycles are required "
2724 "between scans to avoid encountering the target being busy. This "
2725 "command resets those learned values after `wait` scans. It's only "
2726 "useful for testing OpenOCD itself."
2729 .name = "resume_order",
2730 .handler = riscv_resume_order,
2731 .mode = COMMAND_ANY,
2732 .usage = "normal|reversed",
2733 .help = "Choose the order that harts are resumed in when `hasel` is not "
2734 "supported. Normal order is from lowest hart index to highest. "
2735 "Reversed order is from highest hart index to lowest."
2739 .handler = riscv_set_ir,
2740 .mode = COMMAND_ANY,
2741 .usage = "[idcode|dtmcs|dmi] value",
2742 .help = "Set IR value for specified JTAG register."
2745 .name = "use_bscan_tunnel",
2746 .handler = riscv_use_bscan_tunnel,
2747 .mode = COMMAND_ANY,
2748 .usage = "value [type]",
2749 .help = "Enable or disable use of a BSCAN tunnel to reach DM. Supply "
2750 "the width of the DM transport TAP's instruction register to "
2751 "enable. Supply a value of 0 to disable. Pass A second argument "
2752 "(optional) to indicate Bscan Tunnel Type {0:(default) NESTED_TAP , "
2756 .name = "set_enable_virt2phys",
2757 .handler = riscv_set_enable_virt2phys,
2758 .mode = COMMAND_ANY,
2760 .help = "When on (default), enable translation from virtual address to "
2764 .name = "set_ebreakm",
2765 .handler = riscv_set_ebreakm,
2766 .mode = COMMAND_ANY,
2768 .help = "Control dcsr.ebreakm. When off, M-mode ebreak instructions "
2769 "don't trap to OpenOCD. Defaults to on."
2772 .name = "set_ebreaks",
2773 .handler = riscv_set_ebreaks,
2774 .mode = COMMAND_ANY,
2776 .help = "Control dcsr.ebreaks. When off, S-mode ebreak instructions "
2777 "don't trap to OpenOCD. Defaults to on."
2780 .name = "set_ebreaku",
2781 .handler = riscv_set_ebreaku,
2782 .mode = COMMAND_ANY,
2784 .help = "Control dcsr.ebreaku. When off, U-mode ebreak instructions "
2785 "don't trap to OpenOCD. Defaults to on."
2787 COMMAND_REGISTRATION_DONE
2791 * To be noted that RISC-V targets use the same semihosting commands as
2794 * The main reason is compatibility with existing tools. For example the
2795 * Eclipse OpenOCD/SEGGER J-Link/QEMU plug-ins have several widgets to
2796 * configure semihosting, which generate commands like `arm semihosting
2798 * A secondary reason is the fact that the protocol used is exactly the
2799 * one specified by ARM. If RISC-V will ever define its own semihosting
2800 * protocol, then a command like `riscv semihosting enable` will make
2801 * sense, but for now all semihosting commands are prefixed with `arm`.
2803 extern const struct command_registration semihosting_common_handlers[];
2805 const struct command_registration riscv_command_handlers[] = {
2808 .mode = COMMAND_ANY,
2809 .help = "RISC-V Command Group",
2811 .chain = riscv_exec_command_handlers
2815 .mode = COMMAND_ANY,
2816 .help = "ARM Command Group",
2818 .chain = semihosting_common_handlers
2820 COMMAND_REGISTRATION_DONE
2823 static unsigned riscv_xlen_nonconst(struct target *target)
2825 return riscv_xlen(target);
2828 static unsigned int riscv_data_bits(struct target *target)
2832 return r->data_bits(target);
2833 return riscv_xlen(target);
2836 struct target_type riscv_target = {
2839 .init_target = riscv_init_target,
2840 .deinit_target = riscv_deinit_target,
2841 .examine = riscv_examine,
2843 /* poll current target status */
2844 .poll = old_or_new_riscv_poll,
2847 .resume = riscv_target_resume,
2848 .step = old_or_new_riscv_step,
2850 .assert_reset = riscv_assert_reset,
2851 .deassert_reset = riscv_deassert_reset,
2853 .read_memory = riscv_read_memory,
2854 .write_memory = riscv_write_memory,
2855 .read_phys_memory = riscv_read_phys_memory,
2856 .write_phys_memory = riscv_write_phys_memory,
2858 .checksum_memory = riscv_checksum_memory,
2861 .virt2phys = riscv_virt2phys,
2863 .get_gdb_arch = riscv_get_gdb_arch,
2864 .get_gdb_reg_list = riscv_get_gdb_reg_list,
2865 .get_gdb_reg_list_noread = riscv_get_gdb_reg_list_noread,
2867 .add_breakpoint = riscv_add_breakpoint,
2868 .remove_breakpoint = riscv_remove_breakpoint,
2870 .add_watchpoint = riscv_add_watchpoint,
2871 .remove_watchpoint = riscv_remove_watchpoint,
2872 .hit_watchpoint = riscv_hit_watchpoint,
2874 .arch_state = riscv_arch_state,
2876 .run_algorithm = riscv_run_algorithm,
2878 .commands = riscv_command_handlers,
2880 .address_bits = riscv_xlen_nonconst,
2881 .data_bits = riscv_data_bits
2884 /*** RISC-V Interface ***/
2886 void riscv_info_init(struct target *target, riscv_info_t *r)
2888 memset(r, 0, sizeof(*r));
2890 r->registers_initialized = false;
2891 r->current_hartid = target->coreid;
2893 memset(r->trigger_unique_id, 0xff, sizeof(r->trigger_unique_id));
2895 for (size_t h = 0; h < RISCV_MAX_HARTS; ++h)
2899 static int riscv_resume_go_all_harts(struct target *target)
2903 /* Dummy variables to make mingw32-gcc happy. */
2907 switch (resume_order) {
2910 last = riscv_count_harts(target) - 1;
2914 first = riscv_count_harts(target) - 1;
2922 for (int i = first; i != last + step; i += step) {
2923 if (!riscv_hart_enabled(target, i))
2926 LOG_DEBUG("resuming hart %d", i);
2927 if (riscv_set_current_hartid(target, i) != ERROR_OK)
2929 if (riscv_is_halted(target)) {
2930 if (r->resume_go(target) != ERROR_OK)
2933 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
2937 riscv_invalidate_register_cache(target);
2941 int riscv_step_rtos_hart(struct target *target)
2944 int hartid = r->current_hartid;
2945 if (riscv_rtos_enabled(target)) {
2946 hartid = r->rtos_hartid;
2948 LOG_DEBUG("GDB has asked me to step \"any\" thread, so I'm stepping hart 0.");
2952 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
2954 LOG_DEBUG("stepping hart %d", hartid);
2956 if (!riscv_is_halted(target)) {
2957 LOG_ERROR("Hart isn't halted before single step!");
2960 riscv_invalidate_register_cache(target);
2962 if (r->step_current_hart(target) != ERROR_OK)
2964 riscv_invalidate_register_cache(target);
2966 if (!riscv_is_halted(target)) {
2967 LOG_ERROR("Hart was not halted after single step!");
2973 bool riscv_supports_extension(struct target *target, int hartid, char letter)
2977 if (letter >= 'a' && letter <= 'z')
2979 else if (letter >= 'A' && letter <= 'Z')
2983 return r->misa[hartid] & (1 << num);
2986 unsigned riscv_xlen(const struct target *target)
2988 return riscv_xlen_of_hart(target, riscv_current_hartid(target));
2991 int riscv_xlen_of_hart(const struct target *target, int hartid)
2994 assert(r->xlen[hartid] != -1);
2995 return r->xlen[hartid];
2998 bool riscv_rtos_enabled(const struct target *target)
3003 int riscv_set_current_hartid(struct target *target, int hartid)
3006 if (!r->select_current_hart)
3009 int previous_hartid = riscv_current_hartid(target);
3010 r->current_hartid = hartid;
3011 assert(riscv_hart_enabled(target, hartid));
3012 LOG_DEBUG("setting hartid to %d, was %d", hartid, previous_hartid);
3013 if (r->select_current_hart(target) != ERROR_OK)
3016 /* This might get called during init, in which case we shouldn't be
3017 * setting up the register cache. */
3018 if (target_was_examined(target) && riscv_rtos_enabled(target))
3019 riscv_invalidate_register_cache(target);
3024 void riscv_invalidate_register_cache(struct target *target)
3028 LOG_DEBUG("[%d]", target->coreid);
3029 register_cache_invalidate(target->reg_cache);
3030 for (size_t i = 0; i < target->reg_cache->num_regs; ++i) {
3031 struct reg *reg = &target->reg_cache->reg_list[i];
3035 r->registers_initialized = true;
3038 int riscv_current_hartid(const struct target *target)
3041 return r->current_hartid;
3044 void riscv_set_all_rtos_harts(struct target *target)
3047 r->rtos_hartid = -1;
3050 void riscv_set_rtos_hartid(struct target *target, int hartid)
3052 LOG_DEBUG("setting RTOS hartid %d", hartid);
3054 r->rtos_hartid = hartid;
3057 int riscv_count_harts(struct target *target)
3062 if (r == NULL || r->hart_count == NULL)
3064 return r->hart_count(target);
3067 bool riscv_has_register(struct target *target, int hartid, int regid)
3074 * return true iff we are guaranteed that the register will contain exactly
3075 * the value we just wrote when it's read.
3076 * If write is false:
3077 * return true iff we are guaranteed that the register will read the same
3078 * value in the future as the value we just read.
3080 static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
3082 /* GPRs, FPRs, vector registers are just normal data stores. */
3083 if (regno <= GDB_REGNO_XPR31 ||
3084 (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31) ||
3085 (regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31))
3088 /* Most CSRs won't change value on us, but we can't assume it about rbitrary
3094 case GDB_REGNO_VSTART:
3095 case GDB_REGNO_VXSAT:
3096 case GDB_REGNO_VXRM:
3097 case GDB_REGNO_VLENB:
3099 case GDB_REGNO_VTYPE:
3100 case GDB_REGNO_MISA:
3101 case GDB_REGNO_DCSR:
3102 case GDB_REGNO_DSCRATCH0:
3103 case GDB_REGNO_MSTATUS:
3104 case GDB_REGNO_MEPC:
3105 case GDB_REGNO_MCAUSE:
3106 case GDB_REGNO_SATP:
3108 * WARL registers might not contain the value we just wrote, but
3109 * these ones won't spontaneously change their value either. *
3113 case GDB_REGNO_TSELECT: /* I think this should be above, but then it doesn't work. */
3114 case GDB_REGNO_TDATA1: /* Changes value when tselect is changed. */
3115 case GDB_REGNO_TDATA2: /* Changse value when tselect is changed. */
3122 * This function is called when the debug user wants to change the value of a
3123 * register. The new value may be cached, and may not be written until the hart
3125 int riscv_set_register(struct target *target, enum gdb_regno r, riscv_reg_t v)
3127 return riscv_set_register_on_hart(target, riscv_current_hartid(target), r, v);
3130 int riscv_set_register_on_hart(struct target *target, int hartid,
3131 enum gdb_regno regid, uint64_t value)
3134 LOG_DEBUG("{%d} %s <- %" PRIx64, hartid, gdb_regno_name(regid), value);
3135 assert(r->set_register);
3137 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3138 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 && value == 0 &&
3139 riscv_supports_extension(target, hartid, 'E'))
3142 struct reg *reg = &target->reg_cache->reg_list[regid];
3143 buf_set_u64(reg->value, 0, reg->size, value);
3145 int result = r->set_register(target, hartid, regid, value);
3146 if (result == ERROR_OK)
3147 reg->valid = gdb_regno_cacheable(regid, true);
3150 LOG_DEBUG("[%s]{%d} wrote 0x%" PRIx64 " to %s valid=%d",
3151 target_name(target), hartid, value, reg->name, reg->valid);
3155 int riscv_get_register(struct target *target, riscv_reg_t *value,
3158 return riscv_get_register_on_hart(target, value,
3159 riscv_current_hartid(target), r);
3162 int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
3163 int hartid, enum gdb_regno regid)
3167 struct reg *reg = &target->reg_cache->reg_list[regid];
3169 LOG_DEBUG("[%s]{%d} %s does not exist.",
3170 target_name(target), hartid, gdb_regno_name(regid));
3174 if (reg && reg->valid && hartid == riscv_current_hartid(target)) {
3175 *value = buf_get_u64(reg->value, 0, reg->size);
3176 LOG_DEBUG("{%d} %s: %" PRIx64 " (cached)", hartid,
3177 gdb_regno_name(regid), *value);
3181 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3182 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 &&
3183 riscv_supports_extension(target, hartid, 'E')) {
3188 int result = r->get_register(target, value, hartid, regid);
3190 if (result == ERROR_OK)
3191 reg->valid = gdb_regno_cacheable(regid, false);
3193 LOG_DEBUG("{%d} %s: %" PRIx64, hartid, gdb_regno_name(regid), *value);
3197 bool riscv_is_halted(struct target *target)
3200 assert(r->is_halted);
3201 return r->is_halted(target);
3204 enum riscv_halt_reason riscv_halt_reason(struct target *target, int hartid)
3207 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
3208 return RISCV_HALT_ERROR;
3209 if (!riscv_is_halted(target)) {
3210 LOG_ERROR("Hart is not halted!");
3211 return RISCV_HALT_UNKNOWN;
3213 return r->halt_reason(target);
3216 size_t riscv_debug_buffer_size(struct target *target)
3219 return r->debug_buffer_size[riscv_current_hartid(target)];
3222 int riscv_write_debug_buffer(struct target *target, int index, riscv_insn_t insn)
3225 r->write_debug_buffer(target, index, insn);
3229 riscv_insn_t riscv_read_debug_buffer(struct target *target, int index)
3232 return r->read_debug_buffer(target, index);
3235 int riscv_execute_debug_buffer(struct target *target)
3238 return r->execute_debug_buffer(target);
3241 void riscv_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
3244 r->fill_dmi_write_u64(target, buf, a, d);
3247 void riscv_fill_dmi_read_u64(struct target *target, char *buf, int a)
3250 r->fill_dmi_read_u64(target, buf, a);
3253 void riscv_fill_dmi_nop_u64(struct target *target, char *buf)
3256 r->fill_dmi_nop_u64(target, buf);
3259 int riscv_dmi_write_u64_bits(struct target *target)
3262 return r->dmi_write_u64_bits(target);
3265 bool riscv_hart_enabled(struct target *target, int hartid)
3267 /* FIXME: Add a hart mask to the RTOS. */
3268 if (riscv_rtos_enabled(target))
3269 return hartid < riscv_count_harts(target);
3271 return hartid == target->coreid;
3275 * Count triggers, and initialize trigger_count for each hart.
3276 * trigger_count is initialized even if this function fails to discover
3278 * Disable any hardware triggers that have dmode set. We can't have set them
3279 * ourselves. Maybe they're left over from some killed debug session.
3281 int riscv_enumerate_triggers(struct target *target)
3285 if (r->triggers_enumerated)
3288 r->triggers_enumerated = true; /* At the very least we tried. */
3290 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
3291 if (!riscv_hart_enabled(target, hartid))
3294 riscv_reg_t tselect;
3295 int result = riscv_get_register_on_hart(target, &tselect, hartid,
3297 if (result != ERROR_OK)
3300 for (unsigned t = 0; t < RISCV_MAX_TRIGGERS; ++t) {
3301 r->trigger_count[hartid] = t;
3303 /* If we can't write tselect, then this hart does not support triggers. */
3304 if (riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t) != ERROR_OK)
3306 uint64_t tselect_rb;
3307 result = riscv_get_register_on_hart(target, &tselect_rb, hartid,
3309 if (result != ERROR_OK)
3311 /* Mask off the top bit, which is used as tdrmode in old
3312 * implementations. */
3313 tselect_rb &= ~(1ULL << (riscv_xlen(target)-1));
3314 if (tselect_rb != t)
3317 result = riscv_get_register_on_hart(target, &tdata1, hartid,
3319 if (result != ERROR_OK)
3322 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
3327 /* On these older cores we don't support software using
3329 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3332 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target)))
3333 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3338 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
3340 LOG_INFO("[%d] Found %d triggers", hartid, r->trigger_count[hartid]);
3346 const char *gdb_regno_name(enum gdb_regno regno)
3348 static char buf[32];
3351 case GDB_REGNO_ZERO:
3417 case GDB_REGNO_FPR0:
3419 case GDB_REGNO_FPR31:
3421 case GDB_REGNO_CSR0:
3423 case GDB_REGNO_TSELECT:
3425 case GDB_REGNO_TDATA1:
3427 case GDB_REGNO_TDATA2:
3429 case GDB_REGNO_MISA:
3433 case GDB_REGNO_DCSR:
3435 case GDB_REGNO_DSCRATCH0:
3437 case GDB_REGNO_MSTATUS:
3439 case GDB_REGNO_MEPC:
3441 case GDB_REGNO_MCAUSE:
3443 case GDB_REGNO_PRIV:
3445 case GDB_REGNO_SATP:
3447 case GDB_REGNO_VTYPE:
3516 if (regno <= GDB_REGNO_XPR31)
3517 sprintf(buf, "x%d", regno - GDB_REGNO_ZERO);
3518 else if (regno >= GDB_REGNO_CSR0 && regno <= GDB_REGNO_CSR4095)
3519 sprintf(buf, "csr%d", regno - GDB_REGNO_CSR0);
3520 else if (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31)
3521 sprintf(buf, "f%d", regno - GDB_REGNO_FPR0);
3523 sprintf(buf, "gdb_regno_%d", regno);
3528 static int register_get(struct reg *reg)
3530 riscv_reg_info_t *reg_info = reg->arch_info;
3531 struct target *target = reg_info->target;
3534 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3535 if (!r->get_register_buf) {
3536 LOG_ERROR("Reading register %s not supported on this RISC-V target.",
3537 gdb_regno_name(reg->number));
3541 if (r->get_register_buf(target, reg->value, reg->number) != ERROR_OK)
3545 int result = riscv_get_register(target, &value, reg->number);
3546 if (result != ERROR_OK)
3548 buf_set_u64(reg->value, 0, reg->size, value);
3550 reg->valid = gdb_regno_cacheable(reg->number, false);
3551 char *str = buf_to_hex_str(reg->value, reg->size);
3552 LOG_DEBUG("[%d]{%d} read 0x%s from %s (valid=%d)", target->coreid,
3553 riscv_current_hartid(target), str, reg->name, reg->valid);
3558 static int register_set(struct reg *reg, uint8_t *buf)
3560 riscv_reg_info_t *reg_info = reg->arch_info;
3561 struct target *target = reg_info->target;
3564 char *str = buf_to_hex_str(buf, reg->size);
3565 LOG_DEBUG("[%d]{%d} write 0x%s to %s (valid=%d)", target->coreid,
3566 riscv_current_hartid(target), str, reg->name, reg->valid);
3569 memcpy(reg->value, buf, DIV_ROUND_UP(reg->size, 8));
3570 reg->valid = gdb_regno_cacheable(reg->number, true);
3572 if (reg->number == GDB_REGNO_TDATA1 ||
3573 reg->number == GDB_REGNO_TDATA2) {
3574 r->manual_hwbp_set = true;
3575 /* When enumerating triggers, we clear any triggers with DMODE set,
3576 * assuming they were left over from a previous debug session. So make
3577 * sure that is done before a user might be setting their own triggers.
3579 if (riscv_enumerate_triggers(target) != ERROR_OK)
3583 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3584 if (!r->set_register_buf) {
3585 LOG_ERROR("Writing register %s not supported on this RISC-V target.",
3586 gdb_regno_name(reg->number));
3590 if (r->set_register_buf(target, reg->number, reg->value) != ERROR_OK)
3593 uint64_t value = buf_get_u64(buf, 0, reg->size);
3594 if (riscv_set_register(target, reg->number, value) != ERROR_OK)
3601 static struct reg_arch_type riscv_reg_arch_type = {
3602 .get = register_get,
3611 static int cmp_csr_info(const void *p1, const void *p2)
3613 return (int) (((struct csr_info *)p1)->number) - (int) (((struct csr_info *)p2)->number);
3616 int riscv_init_registers(struct target *target)
3620 riscv_free_registers(target);
3622 target->reg_cache = calloc(1, sizeof(*target->reg_cache));
3623 if (!target->reg_cache)
3625 target->reg_cache->name = "RISC-V Registers";
3626 target->reg_cache->num_regs = GDB_REGNO_COUNT;
3628 if (expose_custom) {
3629 for (unsigned i = 0; expose_custom[i].low <= expose_custom[i].high; i++) {
3630 for (unsigned number = expose_custom[i].low;
3631 number <= expose_custom[i].high;
3633 target->reg_cache->num_regs++;
3637 LOG_DEBUG("create register cache for %d registers",
3638 target->reg_cache->num_regs);
3640 target->reg_cache->reg_list =
3641 calloc(target->reg_cache->num_regs, sizeof(struct reg));
3642 if (!target->reg_cache->reg_list)
3645 const unsigned int max_reg_name_len = 12;
3646 free(info->reg_names);
3648 calloc(target->reg_cache->num_regs, max_reg_name_len);
3649 if (!info->reg_names)
3651 char *reg_name = info->reg_names;
3653 int hartid = riscv_current_hartid(target);
3655 static struct reg_feature feature_cpu = {
3656 .name = "org.gnu.gdb.riscv.cpu"
3658 static struct reg_feature feature_fpu = {
3659 .name = "org.gnu.gdb.riscv.fpu"
3661 static struct reg_feature feature_csr = {
3662 .name = "org.gnu.gdb.riscv.csr"
3664 static struct reg_feature feature_vector = {
3665 .name = "org.gnu.gdb.riscv.vector"
3667 static struct reg_feature feature_virtual = {
3668 .name = "org.gnu.gdb.riscv.virtual"
3670 static struct reg_feature feature_custom = {
3671 .name = "org.gnu.gdb.riscv.custom"
3674 /* These types are built into gdb. */
3675 static struct reg_data_type type_ieee_single = { .type = REG_TYPE_IEEE_SINGLE, .id = "ieee_single" };
3676 static struct reg_data_type type_ieee_double = { .type = REG_TYPE_IEEE_DOUBLE, .id = "ieee_double" };
3677 static struct reg_data_type_union_field single_double_fields[] = {
3678 {"float", &type_ieee_single, single_double_fields + 1},
3679 {"double", &type_ieee_double, NULL},
3681 static struct reg_data_type_union single_double_union = {
3682 .fields = single_double_fields
3684 static struct reg_data_type type_ieee_single_double = {
3685 .type = REG_TYPE_ARCH_DEFINED,
3687 .type_class = REG_TYPE_CLASS_UNION,
3688 .reg_type_union = &single_double_union
3690 static struct reg_data_type type_uint8 = { .type = REG_TYPE_UINT8, .id = "uint8" };
3691 static struct reg_data_type type_uint16 = { .type = REG_TYPE_UINT16, .id = "uint16" };
3692 static struct reg_data_type type_uint32 = { .type = REG_TYPE_UINT32, .id = "uint32" };
3693 static struct reg_data_type type_uint64 = { .type = REG_TYPE_UINT64, .id = "uint64" };
3694 static struct reg_data_type type_uint128 = { .type = REG_TYPE_UINT128, .id = "uint128" };
3696 /* This is roughly the XML we want:
3697 * <vector id="bytes" type="uint8" count="16"/>
3698 * <vector id="shorts" type="uint16" count="8"/>
3699 * <vector id="words" type="uint32" count="4"/>
3700 * <vector id="longs" type="uint64" count="2"/>
3701 * <vector id="quads" type="uint128" count="1"/>
3702 * <union id="riscv_vector_type">
3703 * <field name="b" type="bytes"/>
3704 * <field name="s" type="shorts"/>
3705 * <field name="w" type="words"/>
3706 * <field name="l" type="longs"/>
3707 * <field name="q" type="quads"/>
3711 info->vector_uint8.type = &type_uint8;
3712 info->vector_uint8.count = info->vlenb[hartid];
3713 info->type_uint8_vector.type = REG_TYPE_ARCH_DEFINED;
3714 info->type_uint8_vector.id = "bytes";
3715 info->type_uint8_vector.type_class = REG_TYPE_CLASS_VECTOR;
3716 info->type_uint8_vector.reg_type_vector = &info->vector_uint8;
3718 info->vector_uint16.type = &type_uint16;
3719 info->vector_uint16.count = info->vlenb[hartid] / 2;
3720 info->type_uint16_vector.type = REG_TYPE_ARCH_DEFINED;
3721 info->type_uint16_vector.id = "shorts";
3722 info->type_uint16_vector.type_class = REG_TYPE_CLASS_VECTOR;
3723 info->type_uint16_vector.reg_type_vector = &info->vector_uint16;
3725 info->vector_uint32.type = &type_uint32;
3726 info->vector_uint32.count = info->vlenb[hartid] / 4;
3727 info->type_uint32_vector.type = REG_TYPE_ARCH_DEFINED;
3728 info->type_uint32_vector.id = "words";
3729 info->type_uint32_vector.type_class = REG_TYPE_CLASS_VECTOR;
3730 info->type_uint32_vector.reg_type_vector = &info->vector_uint32;
3732 info->vector_uint64.type = &type_uint64;
3733 info->vector_uint64.count = info->vlenb[hartid] / 8;
3734 info->type_uint64_vector.type = REG_TYPE_ARCH_DEFINED;
3735 info->type_uint64_vector.id = "longs";
3736 info->type_uint64_vector.type_class = REG_TYPE_CLASS_VECTOR;
3737 info->type_uint64_vector.reg_type_vector = &info->vector_uint64;
3739 info->vector_uint128.type = &type_uint128;
3740 info->vector_uint128.count = info->vlenb[hartid] / 16;
3741 info->type_uint128_vector.type = REG_TYPE_ARCH_DEFINED;
3742 info->type_uint128_vector.id = "quads";
3743 info->type_uint128_vector.type_class = REG_TYPE_CLASS_VECTOR;
3744 info->type_uint128_vector.reg_type_vector = &info->vector_uint128;
3746 info->vector_fields[0].name = "b";
3747 info->vector_fields[0].type = &info->type_uint8_vector;
3748 if (info->vlenb[hartid] >= 2) {
3749 info->vector_fields[0].next = info->vector_fields + 1;
3750 info->vector_fields[1].name = "s";
3751 info->vector_fields[1].type = &info->type_uint16_vector;
3753 info->vector_fields[0].next = NULL;
3755 if (info->vlenb[hartid] >= 4) {
3756 info->vector_fields[1].next = info->vector_fields + 2;
3757 info->vector_fields[2].name = "w";
3758 info->vector_fields[2].type = &info->type_uint32_vector;
3760 info->vector_fields[1].next = NULL;
3762 if (info->vlenb[hartid] >= 8) {
3763 info->vector_fields[2].next = info->vector_fields + 3;
3764 info->vector_fields[3].name = "l";
3765 info->vector_fields[3].type = &info->type_uint64_vector;
3767 info->vector_fields[2].next = NULL;
3769 if (info->vlenb[hartid] >= 16) {
3770 info->vector_fields[3].next = info->vector_fields + 4;
3771 info->vector_fields[4].name = "q";
3772 info->vector_fields[4].type = &info->type_uint128_vector;
3774 info->vector_fields[3].next = NULL;
3776 info->vector_fields[4].next = NULL;
3778 info->vector_union.fields = info->vector_fields;
3780 info->type_vector.type = REG_TYPE_ARCH_DEFINED;
3781 info->type_vector.id = "riscv_vector";
3782 info->type_vector.type_class = REG_TYPE_CLASS_UNION;
3783 info->type_vector.reg_type_union = &info->vector_union;
3785 struct csr_info csr_info[] = {
3786 #define DECLARE_CSR(name, number) { number, #name },
3787 #include "encoding.h"
3790 /* encoding.h does not contain the registers in sorted order. */
3791 qsort(csr_info, ARRAY_SIZE(csr_info), sizeof(*csr_info), cmp_csr_info);
3792 unsigned csr_info_index = 0;
3794 unsigned custom_range_index = 0;
3795 int custom_within_range = 0;
3797 riscv_reg_info_t *shared_reg_info = calloc(1, sizeof(riscv_reg_info_t));
3798 if (!shared_reg_info)
3800 shared_reg_info->target = target;
3802 /* When gdb requests register N, gdb_get_register_packet() assumes that this
3803 * is register at index N in reg_list. So if there are certain registers
3804 * that don't exist, we need to leave holes in the list (or renumber, but
3805 * it would be nice not to have yet another set of numbers to translate
3807 for (uint32_t number = 0; number < target->reg_cache->num_regs; number++) {
3808 struct reg *r = &target->reg_cache->reg_list[number];
3812 r->type = &riscv_reg_arch_type;
3813 r->arch_info = shared_reg_info;
3815 r->size = riscv_xlen(target);
3816 /* r->size is set in riscv_invalidate_register_cache, maybe because the
3817 * target is in theory allowed to change XLEN on us. But I expect a lot
3818 * of other things to break in that case as well. */
3819 if (number <= GDB_REGNO_XPR31) {
3820 r->exist = number <= GDB_REGNO_XPR15 ||
3821 !riscv_supports_extension(target, hartid, 'E');
3822 /* TODO: For now we fake that all GPRs exist because otherwise gdb
3825 r->caller_save = true;
3827 case GDB_REGNO_ZERO:
3924 r->group = "general";
3925 r->feature = &feature_cpu;
3926 } else if (number == GDB_REGNO_PC) {
3927 r->caller_save = true;
3928 sprintf(reg_name, "pc");
3929 r->group = "general";
3930 r->feature = &feature_cpu;
3931 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
3932 r->caller_save = true;
3933 if (riscv_supports_extension(target, hartid, 'D')) {
3935 if (riscv_supports_extension(target, hartid, 'F'))
3936 r->reg_data_type = &type_ieee_single_double;
3938 r->reg_data_type = &type_ieee_double;
3939 } else if (riscv_supports_extension(target, hartid, 'F')) {
3940 r->reg_data_type = &type_ieee_single;
4024 case GDB_REGNO_FS10:
4027 case GDB_REGNO_FS11:
4036 case GDB_REGNO_FT10:
4039 case GDB_REGNO_FT11:
4044 r->feature = &feature_fpu;
4045 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
4047 r->feature = &feature_csr;
4048 unsigned csr_number = number - GDB_REGNO_CSR0;
4050 while (csr_info[csr_info_index].number < csr_number &&
4051 csr_info_index < ARRAY_SIZE(csr_info) - 1) {
4054 if (csr_info[csr_info_index].number == csr_number) {
4055 r->name = csr_info[csr_info_index].name;
4057 sprintf(reg_name, "csr%d", csr_number);
4058 /* Assume unnamed registers don't exist, unless we have some
4059 * configuration that tells us otherwise. That's important
4060 * because eg. Eclipse crashes if a target has too many
4061 * registers, and apparently has no way of only showing a
4062 * subset of registers in any case. */
4066 switch (csr_number) {
4070 r->exist = riscv_supports_extension(target, hartid, 'F');
4072 r->feature = &feature_fpu;
4078 case CSR_SCOUNTEREN:
4084 r->exist = riscv_supports_extension(target, hartid, 'S');
4088 /* "In systems with only M-mode, or with both M-mode and
4089 * U-mode but without U-mode trap support, the medeleg and
4090 * mideleg registers should not exist." */
4091 r->exist = riscv_supports_extension(target, hartid, 'S') ||
4092 riscv_supports_extension(target, hartid, 'N');
4100 case CSR_HPMCOUNTER3H:
4101 case CSR_HPMCOUNTER4H:
4102 case CSR_HPMCOUNTER5H:
4103 case CSR_HPMCOUNTER6H:
4104 case CSR_HPMCOUNTER7H:
4105 case CSR_HPMCOUNTER8H:
4106 case CSR_HPMCOUNTER9H:
4107 case CSR_HPMCOUNTER10H:
4108 case CSR_HPMCOUNTER11H:
4109 case CSR_HPMCOUNTER12H:
4110 case CSR_HPMCOUNTER13H:
4111 case CSR_HPMCOUNTER14H:
4112 case CSR_HPMCOUNTER15H:
4113 case CSR_HPMCOUNTER16H:
4114 case CSR_HPMCOUNTER17H:
4115 case CSR_HPMCOUNTER18H:
4116 case CSR_HPMCOUNTER19H:
4117 case CSR_HPMCOUNTER20H:
4118 case CSR_HPMCOUNTER21H:
4119 case CSR_HPMCOUNTER22H:
4120 case CSR_HPMCOUNTER23H:
4121 case CSR_HPMCOUNTER24H:
4122 case CSR_HPMCOUNTER25H:
4123 case CSR_HPMCOUNTER26H:
4124 case CSR_HPMCOUNTER27H:
4125 case CSR_HPMCOUNTER28H:
4126 case CSR_HPMCOUNTER29H:
4127 case CSR_HPMCOUNTER30H:
4128 case CSR_HPMCOUNTER31H:
4131 case CSR_MHPMCOUNTER3H:
4132 case CSR_MHPMCOUNTER4H:
4133 case CSR_MHPMCOUNTER5H:
4134 case CSR_MHPMCOUNTER6H:
4135 case CSR_MHPMCOUNTER7H:
4136 case CSR_MHPMCOUNTER8H:
4137 case CSR_MHPMCOUNTER9H:
4138 case CSR_MHPMCOUNTER10H:
4139 case CSR_MHPMCOUNTER11H:
4140 case CSR_MHPMCOUNTER12H:
4141 case CSR_MHPMCOUNTER13H:
4142 case CSR_MHPMCOUNTER14H:
4143 case CSR_MHPMCOUNTER15H:
4144 case CSR_MHPMCOUNTER16H:
4145 case CSR_MHPMCOUNTER17H:
4146 case CSR_MHPMCOUNTER18H:
4147 case CSR_MHPMCOUNTER19H:
4148 case CSR_MHPMCOUNTER20H:
4149 case CSR_MHPMCOUNTER21H:
4150 case CSR_MHPMCOUNTER22H:
4151 case CSR_MHPMCOUNTER23H:
4152 case CSR_MHPMCOUNTER24H:
4153 case CSR_MHPMCOUNTER25H:
4154 case CSR_MHPMCOUNTER26H:
4155 case CSR_MHPMCOUNTER27H:
4156 case CSR_MHPMCOUNTER28H:
4157 case CSR_MHPMCOUNTER29H:
4158 case CSR_MHPMCOUNTER30H:
4159 case CSR_MHPMCOUNTER31H:
4160 r->exist = riscv_xlen(target) == 32;
4169 r->exist = riscv_supports_extension(target, hartid, 'V');
4173 if (!r->exist && expose_csr) {
4174 for (unsigned i = 0; expose_csr[i].low <= expose_csr[i].high; i++) {
4175 if (csr_number >= expose_csr[i].low && csr_number <= expose_csr[i].high) {
4176 LOG_INFO("Exposing additional CSR %d", csr_number);
4183 } else if (number == GDB_REGNO_PRIV) {
4184 sprintf(reg_name, "priv");
4185 r->group = "general";
4186 r->feature = &feature_virtual;
4189 } else if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) {
4190 r->caller_save = false;
4191 r->exist = riscv_supports_extension(target, hartid, 'V') && info->vlenb[hartid];
4192 r->size = info->vlenb[hartid] * 8;
4193 sprintf(reg_name, "v%d", number - GDB_REGNO_V0);
4194 r->group = "vector";
4195 r->feature = &feature_vector;
4196 r->reg_data_type = &info->type_vector;
4198 } else if (number >= GDB_REGNO_COUNT) {
4199 /* Custom registers. */
4200 assert(expose_custom);
4202 range_t *range = &expose_custom[custom_range_index];
4203 assert(range->low <= range->high);
4204 unsigned custom_number = range->low + custom_within_range;
4206 r->group = "custom";
4207 r->feature = &feature_custom;
4208 r->arch_info = calloc(1, sizeof(riscv_reg_info_t));
4211 ((riscv_reg_info_t *) r->arch_info)->target = target;
4212 ((riscv_reg_info_t *) r->arch_info)->custom_number = custom_number;
4213 sprintf(reg_name, "custom%d", custom_number);
4215 custom_within_range++;
4216 if (custom_within_range > range->high - range->low) {
4217 custom_within_range = 0;
4218 custom_range_index++;
4224 reg_name += strlen(reg_name) + 1;
4225 assert(reg_name < info->reg_names + target->reg_cache->num_regs *
4227 r->value = info->reg_cache_values[number];
4234 void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field,
4235 riscv_bscan_tunneled_scan_context_t *ctxt)
4237 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
4239 memset(ctxt->tunneled_dr, 0, sizeof(ctxt->tunneled_dr));
4240 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
4241 ctxt->tunneled_dr[3].num_bits = 1;
4242 ctxt->tunneled_dr[3].out_value = bscan_one;
4243 ctxt->tunneled_dr[2].num_bits = 7;
4244 ctxt->tunneled_dr_width = field->num_bits;
4245 ctxt->tunneled_dr[2].out_value = &ctxt->tunneled_dr_width;
4246 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4247 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4249 ctxt->tunneled_dr[1].num_bits = field->num_bits + 1;
4250 ctxt->tunneled_dr[1].out_value = field->out_value;
4251 ctxt->tunneled_dr[1].in_value = field->in_value;
4253 ctxt->tunneled_dr[0].num_bits = 3;
4254 ctxt->tunneled_dr[0].out_value = bscan_zero;
4256 /* BSCAN_TUNNEL_NESTED_TAP */
4257 ctxt->tunneled_dr[0].num_bits = 1;
4258 ctxt->tunneled_dr[0].out_value = bscan_one;
4259 ctxt->tunneled_dr[1].num_bits = 7;
4260 ctxt->tunneled_dr_width = field->num_bits;
4261 ctxt->tunneled_dr[1].out_value = &ctxt->tunneled_dr_width;
4262 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4263 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4264 ctxt->tunneled_dr[2].num_bits = field->num_bits + 1;
4265 ctxt->tunneled_dr[2].out_value = field->out_value;
4266 ctxt->tunneled_dr[2].in_value = field->in_value;
4267 ctxt->tunneled_dr[3].num_bits = 3;
4268 ctxt->tunneled_dr[3].out_value = bscan_zero;
4270 jtag_add_dr_scan(target->tap, ARRAY_SIZE(ctxt->tunneled_dr), ctxt->tunneled_dr, TAP_IDLE);