1 /* SPDX-License-Identifier: GPL-2.0-or-later */
11 #include "target/target.h"
12 #include "target/algorithm.h"
13 #include "target/target_type.h"
15 #include "jtag/jtag.h"
16 #include "target/register.h"
17 #include "target/breakpoints.h"
18 #include "helper/time_support.h"
21 #include "rtos/rtos.h"
23 #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
24 #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
26 #define DIM(x) (sizeof(x)/sizeof(*x))
28 /* Constants for legacy SiFive hardware breakpoints. */
29 #define CSR_BPCONTROL_X (1<<0)
30 #define CSR_BPCONTROL_W (1<<1)
31 #define CSR_BPCONTROL_R (1<<2)
32 #define CSR_BPCONTROL_U (1<<3)
33 #define CSR_BPCONTROL_S (1<<4)
34 #define CSR_BPCONTROL_H (1<<5)
35 #define CSR_BPCONTROL_M (1<<6)
36 #define CSR_BPCONTROL_BPMATCH (0xf<<7)
37 #define CSR_BPCONTROL_BPACTION (0xff<<11)
39 #define DEBUG_ROM_START 0x800
40 #define DEBUG_ROM_RESUME (DEBUG_ROM_START + 4)
41 #define DEBUG_ROM_EXCEPTION (DEBUG_ROM_START + 8)
42 #define DEBUG_RAM_START 0x400
44 #define SETHALTNOT 0x10c
46 /*** JTAG registers. ***/
48 #define DTMCONTROL 0x10
49 #define DTMCONTROL_DBUS_RESET (1<<16)
50 #define DTMCONTROL_IDLE (7<<10)
51 #define DTMCONTROL_ADDRBITS (0xf<<4)
52 #define DTMCONTROL_VERSION (0xf)
55 #define DBUS_OP_START 0
56 #define DBUS_OP_SIZE 2
63 DBUS_STATUS_SUCCESS = 0,
64 DBUS_STATUS_FAILED = 2,
67 #define DBUS_DATA_START 2
68 #define DBUS_DATA_SIZE 34
69 #define DBUS_ADDRESS_START 36
77 /*** Debug Bus registers. ***/
79 #define DMCONTROL 0x10
80 #define DMCONTROL_INTERRUPT (((uint64_t)1)<<33)
81 #define DMCONTROL_HALTNOT (((uint64_t)1)<<32)
82 #define DMCONTROL_BUSERROR (7<<19)
83 #define DMCONTROL_SERIAL (3<<16)
84 #define DMCONTROL_AUTOINCREMENT (1<<15)
85 #define DMCONTROL_ACCESS (7<<12)
86 #define DMCONTROL_HARTID (0x3ff<<2)
87 #define DMCONTROL_NDRESET (1<<1)
88 #define DMCONTROL_FULLRESET 1
91 #define DMINFO_ABUSSIZE (0x7fU<<25)
92 #define DMINFO_SERIALCOUNT (0xf<<21)
93 #define DMINFO_ACCESS128 (1<<20)
94 #define DMINFO_ACCESS64 (1<<19)
95 #define DMINFO_ACCESS32 (1<<18)
96 #define DMINFO_ACCESS16 (1<<17)
97 #define DMINFO_ACCESS8 (1<<16)
98 #define DMINFO_DRAMSIZE (0x3f<<10)
99 #define DMINFO_AUTHENTICATED (1<<5)
100 #define DMINFO_AUTHBUSY (1<<4)
101 #define DMINFO_AUTHTYPE (3<<2)
102 #define DMINFO_VERSION 3
104 /*** Info about the core being debugged. ***/
106 #define DBUS_ADDRESS_UNKNOWN 0xffff
109 #define DRAM_CACHE_SIZE 16
111 uint8_t ir_dtmcontrol[4] = {DTMCONTROL};
112 struct scan_field select_dtmcontrol = {
114 .out_value = ir_dtmcontrol
116 uint8_t ir_dbus[4] = {DBUS};
117 struct scan_field select_dbus = {
121 uint8_t ir_idcode[4] = {0x1};
122 struct scan_field select_idcode = {
124 .out_value = ir_idcode
127 bscan_tunnel_type_t bscan_tunnel_type;
128 int bscan_tunnel_ir_width; /* if zero, then tunneling is not present/active */
130 static uint8_t bscan_zero[4] = {0};
131 static uint8_t bscan_one[4] = {1};
133 uint8_t ir_user4[4] = {0x23};
134 struct scan_field select_user4 = {
136 .out_value = ir_user4
140 uint8_t bscan_tunneled_ir_width[4] = {5}; /* overridden by assignment in riscv_init_target */
141 struct scan_field _bscan_tunnel_data_register_select_dmi[] = {
144 .out_value = bscan_zero,
148 .num_bits = 5, /* initialized in riscv_init_target to ir width of DM */
149 .out_value = ir_dbus,
154 .out_value = bscan_tunneled_ir_width,
159 .out_value = bscan_zero,
164 struct scan_field _bscan_tunnel_nested_tap_select_dmi[] = {
167 .out_value = bscan_zero,
172 .out_value = bscan_tunneled_ir_width,
176 .num_bits = 0, /* initialized in riscv_init_target to ir width of DM */
177 .out_value = ir_dbus,
182 .out_value = bscan_zero,
186 struct scan_field *bscan_tunnel_nested_tap_select_dmi = _bscan_tunnel_nested_tap_select_dmi;
187 uint32_t bscan_tunnel_nested_tap_select_dmi_num_fields = DIM(_bscan_tunnel_nested_tap_select_dmi);
189 struct scan_field *bscan_tunnel_data_register_select_dmi = _bscan_tunnel_data_register_select_dmi;
190 uint32_t bscan_tunnel_data_register_select_dmi_num_fields = DIM(_bscan_tunnel_data_register_select_dmi);
197 bool read, write, execute;
201 /* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
202 int riscv_command_timeout_sec = DEFAULT_COMMAND_TIMEOUT_SEC;
204 /* Wall-clock timeout after reset. Settable via RISC-V Target commands.*/
205 int riscv_reset_timeout_sec = DEFAULT_RESET_TIMEOUT_SEC;
207 bool riscv_prefer_sba;
208 bool riscv_enable_virt2phys = true;
209 bool riscv_ebreakm = true;
210 bool riscv_ebreaks = true;
211 bool riscv_ebreaku = true;
213 bool riscv_enable_virtual;
219 /* In addition to the ones in the standard spec, we'll also expose additional
221 * The list is either NULL, or a series of ranges (inclusive), terminated with
224 /* Same, but for custom registers. */
225 range_t *expose_custom;
232 virt2phys_info_t sv32 = {
237 .vpn_shift = {12, 22},
238 .vpn_mask = {0x3ff, 0x3ff},
239 .pte_ppn_shift = {10, 20},
240 .pte_ppn_mask = {0x3ff, 0xfff},
241 .pa_ppn_shift = {12, 22},
242 .pa_ppn_mask = {0x3ff, 0xfff},
245 virt2phys_info_t sv39 = {
250 .vpn_shift = {12, 21, 30},
251 .vpn_mask = {0x1ff, 0x1ff, 0x1ff},
252 .pte_ppn_shift = {10, 19, 28},
253 .pte_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
254 .pa_ppn_shift = {12, 21, 30},
255 .pa_ppn_mask = {0x1ff, 0x1ff, 0x3ffffff},
258 virt2phys_info_t sv48 = {
263 .vpn_shift = {12, 21, 30, 39},
264 .vpn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ff},
265 .pte_ppn_shift = {10, 19, 28, 37},
266 .pte_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
267 .pa_ppn_shift = {12, 21, 30, 39},
268 .pa_ppn_mask = {0x1ff, 0x1ff, 0x1ff, 0x1ffff},
271 static int riscv_resume_go_all_harts(struct target *target);
273 void select_dmi_via_bscan(struct target *target)
275 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
276 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
277 jtag_add_dr_scan(target->tap, bscan_tunnel_data_register_select_dmi_num_fields,
278 bscan_tunnel_data_register_select_dmi, TAP_IDLE);
279 else /* BSCAN_TUNNEL_NESTED_TAP */
280 jtag_add_dr_scan(target->tap, bscan_tunnel_nested_tap_select_dmi_num_fields,
281 bscan_tunnel_nested_tap_select_dmi, TAP_IDLE);
284 uint32_t dtmcontrol_scan_via_bscan(struct target *target, uint32_t out)
286 /* On BSCAN TAP: Select IR=USER4, issue tunneled IR scan via BSCAN TAP's DR */
287 uint8_t tunneled_ir_width[4] = {bscan_tunnel_ir_width};
288 uint8_t tunneled_dr_width[4] = {32};
289 uint8_t out_value[5] = {0};
290 uint8_t in_value[5] = {0};
292 buf_set_u32(out_value, 0, 32, out);
293 struct scan_field tunneled_ir[4] = {};
294 struct scan_field tunneled_dr[4] = {};
296 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
297 tunneled_ir[0].num_bits = 3;
298 tunneled_ir[0].out_value = bscan_zero;
299 tunneled_ir[0].in_value = NULL;
300 tunneled_ir[1].num_bits = bscan_tunnel_ir_width;
301 tunneled_ir[1].out_value = ir_dtmcontrol;
302 tunneled_ir[1].in_value = NULL;
303 tunneled_ir[2].num_bits = 7;
304 tunneled_ir[2].out_value = tunneled_ir_width;
305 tunneled_ir[2].in_value = NULL;
306 tunneled_ir[3].num_bits = 1;
307 tunneled_ir[3].out_value = bscan_zero;
308 tunneled_ir[3].in_value = NULL;
310 tunneled_dr[0].num_bits = 3;
311 tunneled_dr[0].out_value = bscan_zero;
312 tunneled_dr[0].in_value = NULL;
313 tunneled_dr[1].num_bits = 32 + 1;
314 tunneled_dr[1].out_value = out_value;
315 tunneled_dr[1].in_value = in_value;
316 tunneled_dr[2].num_bits = 7;
317 tunneled_dr[2].out_value = tunneled_dr_width;
318 tunneled_dr[2].in_value = NULL;
319 tunneled_dr[3].num_bits = 1;
320 tunneled_dr[3].out_value = bscan_one;
321 tunneled_dr[3].in_value = NULL;
323 /* BSCAN_TUNNEL_NESTED_TAP */
324 tunneled_ir[3].num_bits = 3;
325 tunneled_ir[3].out_value = bscan_zero;
326 tunneled_ir[3].in_value = NULL;
327 tunneled_ir[2].num_bits = bscan_tunnel_ir_width;
328 tunneled_ir[2].out_value = ir_dtmcontrol;
329 tunneled_ir[1].in_value = NULL;
330 tunneled_ir[1].num_bits = 7;
331 tunneled_ir[1].out_value = tunneled_ir_width;
332 tunneled_ir[2].in_value = NULL;
333 tunneled_ir[0].num_bits = 1;
334 tunneled_ir[0].out_value = bscan_zero;
335 tunneled_ir[0].in_value = NULL;
337 tunneled_dr[3].num_bits = 3;
338 tunneled_dr[3].out_value = bscan_zero;
339 tunneled_dr[3].in_value = NULL;
340 tunneled_dr[2].num_bits = 32 + 1;
341 tunneled_dr[2].out_value = out_value;
342 tunneled_dr[2].in_value = in_value;
343 tunneled_dr[1].num_bits = 7;
344 tunneled_dr[1].out_value = tunneled_dr_width;
345 tunneled_dr[1].in_value = NULL;
346 tunneled_dr[0].num_bits = 1;
347 tunneled_dr[0].out_value = bscan_one;
348 tunneled_dr[0].in_value = NULL;
350 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
351 jtag_add_dr_scan(target->tap, DIM(tunneled_ir), tunneled_ir, TAP_IDLE);
352 jtag_add_dr_scan(target->tap, DIM(tunneled_dr), tunneled_dr, TAP_IDLE);
353 select_dmi_via_bscan(target);
355 int retval = jtag_execute_queue();
356 if (retval != ERROR_OK) {
357 LOG_ERROR("failed jtag scan: %d", retval);
360 /* Note the starting offset is bit 1, not bit 0. In BSCAN tunnel, there is a one-bit TCK skew between
362 uint32_t in = buf_get_u32(in_value, 1, 32);
363 LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
370 static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
372 struct scan_field field;
374 uint8_t out_value[4] = { 0 };
376 if (bscan_tunnel_ir_width != 0)
377 return dtmcontrol_scan_via_bscan(target, out);
380 buf_set_u32(out_value, 0, 32, out);
382 jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
385 field.out_value = out_value;
386 field.in_value = in_value;
387 jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
389 /* Always return to dbus. */
390 jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
392 int retval = jtag_execute_queue();
393 if (retval != ERROR_OK) {
394 LOG_ERROR("failed jtag scan: %d", retval);
398 uint32_t in = buf_get_u32(field.in_value, 0, 32);
399 LOG_DEBUG("DTMCONTROL: 0x%x -> 0x%x", out, in);
404 static struct target_type *get_target_type(struct target *target)
406 riscv_info_t *info = (riscv_info_t *) target->arch_info;
409 LOG_ERROR("Target has not been initialized");
413 switch (info->dtm_version) {
415 return &riscv011_target;
417 return &riscv013_target;
419 LOG_ERROR("Unsupported DTM version: %d", info->dtm_version);
424 static int riscv_init_target(struct command_context *cmd_ctx,
425 struct target *target)
427 LOG_DEBUG("riscv_init_target()");
428 target->arch_info = calloc(1, sizeof(riscv_info_t));
429 if (!target->arch_info)
431 riscv_info_t *info = (riscv_info_t *) target->arch_info;
432 riscv_info_init(target, info);
433 info->cmd_ctx = cmd_ctx;
435 select_dtmcontrol.num_bits = target->tap->ir_length;
436 select_dbus.num_bits = target->tap->ir_length;
437 select_idcode.num_bits = target->tap->ir_length;
439 if (bscan_tunnel_ir_width != 0) {
440 select_user4.num_bits = target->tap->ir_length;
441 bscan_tunneled_ir_width[0] = bscan_tunnel_ir_width;
442 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
443 bscan_tunnel_data_register_select_dmi[1].num_bits = bscan_tunnel_ir_width;
444 else /* BSCAN_TUNNEL_NESTED_TAP */
445 bscan_tunnel_nested_tap_select_dmi[2].num_bits = bscan_tunnel_ir_width;
448 riscv_semihosting_init(target);
450 target->debug_reason = DBG_REASON_DBGRQ;
455 static void riscv_free_registers(struct target *target)
457 /* Free the shared structure use for most registers. */
458 if (target->reg_cache) {
459 if (target->reg_cache->reg_list) {
460 free(target->reg_cache->reg_list[0].arch_info);
461 /* Free the ones we allocated separately. */
462 for (unsigned i = GDB_REGNO_COUNT; i < target->reg_cache->num_regs; i++)
463 free(target->reg_cache->reg_list[i].arch_info);
464 free(target->reg_cache->reg_list);
466 free(target->reg_cache);
470 static void riscv_deinit_target(struct target *target)
472 LOG_DEBUG("riscv_deinit_target()");
473 struct target_type *tt = get_target_type(target);
475 tt->deinit_target(target);
476 riscv_info_t *info = (riscv_info_t *) target->arch_info;
477 free(info->reg_names);
481 riscv_free_registers(target);
483 target->arch_info = NULL;
486 static void trigger_from_breakpoint(struct trigger *trigger,
487 const struct breakpoint *breakpoint)
489 trigger->address = breakpoint->address;
490 trigger->length = breakpoint->length;
491 trigger->mask = ~0LL;
492 trigger->read = false;
493 trigger->write = false;
494 trigger->execute = true;
495 /* unique_id is unique across both breakpoints and watchpoints. */
496 trigger->unique_id = breakpoint->unique_id;
499 static int maybe_add_trigger_t1(struct target *target, unsigned hartid,
500 struct trigger *trigger, uint64_t tdata1)
504 const uint32_t bpcontrol_x = 1<<0;
505 const uint32_t bpcontrol_w = 1<<1;
506 const uint32_t bpcontrol_r = 1<<2;
507 const uint32_t bpcontrol_u = 1<<3;
508 const uint32_t bpcontrol_s = 1<<4;
509 const uint32_t bpcontrol_h = 1<<5;
510 const uint32_t bpcontrol_m = 1<<6;
511 const uint32_t bpcontrol_bpmatch = 0xf << 7;
512 const uint32_t bpcontrol_bpaction = 0xff << 11;
514 if (tdata1 & (bpcontrol_r | bpcontrol_w | bpcontrol_x)) {
515 /* Trigger is already in use, presumably by user code. */
516 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
519 tdata1 = set_field(tdata1, bpcontrol_r, trigger->read);
520 tdata1 = set_field(tdata1, bpcontrol_w, trigger->write);
521 tdata1 = set_field(tdata1, bpcontrol_x, trigger->execute);
522 tdata1 = set_field(tdata1, bpcontrol_u,
523 !!(r->misa[hartid] & (1 << ('U' - 'A'))));
524 tdata1 = set_field(tdata1, bpcontrol_s,
525 !!(r->misa[hartid] & (1 << ('S' - 'A'))));
526 tdata1 = set_field(tdata1, bpcontrol_h,
527 !!(r->misa[hartid] & (1 << ('H' - 'A'))));
528 tdata1 |= bpcontrol_m;
529 tdata1 = set_field(tdata1, bpcontrol_bpmatch, 0); /* exact match */
530 tdata1 = set_field(tdata1, bpcontrol_bpaction, 0); /* cause bp exception */
532 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
534 riscv_reg_t tdata1_rb;
535 if (riscv_get_register_on_hart(target, &tdata1_rb, hartid,
536 GDB_REGNO_TDATA1) != ERROR_OK)
538 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
540 if (tdata1 != tdata1_rb) {
541 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
542 PRIx64 " to tdata1 it contains 0x%" PRIx64,
544 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
545 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
548 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
553 static int maybe_add_trigger_t2(struct target *target, unsigned hartid,
554 struct trigger *trigger, uint64_t tdata1)
558 /* tselect is already set */
559 if (tdata1 & (MCONTROL_EXECUTE | MCONTROL_STORE | MCONTROL_LOAD)) {
560 /* Trigger is already in use, presumably by user code. */
561 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
564 /* address/data match trigger */
565 tdata1 |= MCONTROL_DMODE(riscv_xlen(target));
566 tdata1 = set_field(tdata1, MCONTROL_ACTION,
567 MCONTROL_ACTION_DEBUG_MODE);
568 tdata1 = set_field(tdata1, MCONTROL_MATCH, MCONTROL_MATCH_EQUAL);
569 tdata1 |= MCONTROL_M;
570 if (r->misa[hartid] & (1 << ('H' - 'A')))
571 tdata1 |= MCONTROL_H;
572 if (r->misa[hartid] & (1 << ('S' - 'A')))
573 tdata1 |= MCONTROL_S;
574 if (r->misa[hartid] & (1 << ('U' - 'A')))
575 tdata1 |= MCONTROL_U;
577 if (trigger->execute)
578 tdata1 |= MCONTROL_EXECUTE;
580 tdata1 |= MCONTROL_LOAD;
582 tdata1 |= MCONTROL_STORE;
584 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
587 int result = riscv_get_register_on_hart(target, &tdata1_rb, hartid, GDB_REGNO_TDATA1);
588 if (result != ERROR_OK)
590 LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
592 if (tdata1 != tdata1_rb) {
593 LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
594 PRIx64 " to tdata1 it contains 0x%" PRIx64,
596 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
597 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
600 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
605 static int add_trigger(struct target *target, struct trigger *trigger)
609 if (riscv_enumerate_triggers(target) != ERROR_OK)
612 /* In RTOS mode, we need to set the same trigger in the same slot on every
613 * hart, to keep up the illusion that each hart is a thread running on the
616 /* Otherwise, we just set the trigger on the one hart this target deals
619 riscv_reg_t tselect[RISCV_MAX_HARTS];
622 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
623 if (!riscv_hart_enabled(target, hartid))
627 int result = riscv_get_register_on_hart(target, &tselect[hartid],
628 hartid, GDB_REGNO_TSELECT);
629 if (result != ERROR_OK)
632 assert(first_hart >= 0);
635 for (i = 0; i < r->trigger_count[first_hart]; i++) {
636 if (r->trigger_unique_id[i] != -1)
639 riscv_set_register_on_hart(target, first_hart, GDB_REGNO_TSELECT, i);
642 int result = riscv_get_register_on_hart(target, &tdata1, first_hart,
644 if (result != ERROR_OK)
646 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
649 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
650 if (!riscv_hart_enabled(target, hartid))
652 if (hartid > first_hart)
653 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
656 result = maybe_add_trigger_t1(target, hartid, trigger, tdata1);
659 result = maybe_add_trigger_t2(target, hartid, trigger, tdata1);
662 LOG_DEBUG("trigger %d has unknown type %d", i, type);
666 if (result != ERROR_OK)
670 if (result != ERROR_OK)
673 LOG_DEBUG("[%d] Using trigger %d (type %d) for bp %d", target->coreid,
674 i, type, trigger->unique_id);
675 r->trigger_unique_id[i] = trigger->unique_id;
679 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
680 if (!riscv_hart_enabled(target, hartid))
682 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT,
686 if (i >= r->trigger_count[first_hart]) {
687 LOG_ERROR("Couldn't find an available hardware trigger.");
688 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
694 int riscv_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
696 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, breakpoint->address);
698 if (breakpoint->type == BKPT_SOFT) {
699 /** @todo check RVC for size/alignment */
700 if (!(breakpoint->length == 4 || breakpoint->length == 2)) {
701 LOG_ERROR("Invalid breakpoint length %d", breakpoint->length);
705 if (0 != (breakpoint->address % 2)) {
706 LOG_ERROR("Invalid breakpoint alignment for address 0x%" TARGET_PRIxADDR, breakpoint->address);
710 if (target_read_memory(target, breakpoint->address, 2, breakpoint->length / 2,
711 breakpoint->orig_instr) != ERROR_OK) {
712 LOG_ERROR("Failed to read original instruction at 0x%" TARGET_PRIxADDR,
713 breakpoint->address);
717 uint8_t buff[4] = { 0 };
718 buf_set_u32(buff, 0, breakpoint->length * CHAR_BIT, breakpoint->length == 4 ? ebreak() : ebreak_c());
719 int const retval = target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2, buff);
721 if (retval != ERROR_OK) {
722 LOG_ERROR("Failed to write %d-byte breakpoint instruction at 0x%"
723 TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
727 } else if (breakpoint->type == BKPT_HARD) {
728 struct trigger trigger;
729 trigger_from_breakpoint(&trigger, breakpoint);
730 int const result = add_trigger(target, &trigger);
731 if (result != ERROR_OK)
734 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
735 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
738 breakpoint->set = true;
742 static int remove_trigger(struct target *target, struct trigger *trigger)
746 if (riscv_enumerate_triggers(target) != ERROR_OK)
750 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
751 if (!riscv_hart_enabled(target, hartid))
753 if (first_hart < 0) {
758 assert(first_hart >= 0);
761 for (i = 0; i < r->trigger_count[first_hart]; i++) {
762 if (r->trigger_unique_id[i] == trigger->unique_id)
765 if (i >= r->trigger_count[first_hart]) {
766 LOG_ERROR("Couldn't find the hardware resources used by hardware "
770 LOG_DEBUG("[%d] Stop using resource %d for bp %d", target->coreid, i,
772 for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
773 if (!riscv_hart_enabled(target, hartid))
776 int result = riscv_get_register_on_hart(target, &tselect, hartid, GDB_REGNO_TSELECT);
777 if (result != ERROR_OK)
779 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
780 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
781 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
783 r->trigger_unique_id[i] = -1;
788 int riscv_remove_breakpoint(struct target *target,
789 struct breakpoint *breakpoint)
791 if (breakpoint->type == BKPT_SOFT) {
792 if (target_write_memory(target, breakpoint->address, 2, breakpoint->length / 2,
793 breakpoint->orig_instr) != ERROR_OK) {
794 LOG_ERROR("Failed to restore instruction for %d-byte breakpoint at "
795 "0x%" TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
799 } else if (breakpoint->type == BKPT_HARD) {
800 struct trigger trigger;
801 trigger_from_breakpoint(&trigger, breakpoint);
802 int result = remove_trigger(target, &trigger);
803 if (result != ERROR_OK)
807 LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
808 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
811 breakpoint->set = false;
816 static void trigger_from_watchpoint(struct trigger *trigger,
817 const struct watchpoint *watchpoint)
819 trigger->address = watchpoint->address;
820 trigger->length = watchpoint->length;
821 trigger->mask = watchpoint->mask;
822 trigger->value = watchpoint->value;
823 trigger->read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
824 trigger->write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
825 trigger->execute = false;
826 /* unique_id is unique across both breakpoints and watchpoints. */
827 trigger->unique_id = watchpoint->unique_id;
830 int riscv_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
832 struct trigger trigger;
833 trigger_from_watchpoint(&trigger, watchpoint);
835 int result = add_trigger(target, &trigger);
836 if (result != ERROR_OK)
838 watchpoint->set = true;
843 int riscv_remove_watchpoint(struct target *target,
844 struct watchpoint *watchpoint)
846 LOG_DEBUG("[%d] @0x%" TARGET_PRIxADDR, target->coreid, watchpoint->address);
848 struct trigger trigger;
849 trigger_from_watchpoint(&trigger, watchpoint);
851 int result = remove_trigger(target, &trigger);
852 if (result != ERROR_OK)
854 watchpoint->set = false;
859 /* Sets *hit_watchpoint to the first watchpoint identified as causing the
862 * The GDB server uses this information to tell GDB what data address has
863 * been hit, which enables GDB to print the hit variable along with its old
865 int riscv_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
867 struct watchpoint *wp = target->watchpoints;
869 if (riscv_rtos_enabled(target))
870 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
871 LOG_DEBUG("Current hartid = %d", riscv_current_hartid(target));
873 /*TODO instead of disassembling the instruction that we think caused the
874 * trigger, check the hit bit of each watchpoint first. The hit bit is
875 * simpler and more reliable to check but as it is optional and relatively
876 * new, not all hardware will implement it */
878 riscv_get_register(target, &dpc, GDB_REGNO_DPC);
879 const uint8_t length = 4;
880 LOG_DEBUG("dpc is 0x%" PRIx64, dpc);
882 /* fetch the instruction at dpc */
883 uint8_t buffer[length];
884 if (target_read_buffer(target, dpc, length, buffer) != ERROR_OK) {
885 LOG_ERROR("Failed to read instruction at dpc 0x%" PRIx64, dpc);
889 uint32_t instruction = 0;
891 for (int i = 0; i < length; i++) {
892 LOG_DEBUG("Next byte is %x", buffer[i]);
893 instruction += (buffer[i] << 8 * i);
895 LOG_DEBUG("Full instruction is %x", instruction);
897 /* find out which memory address is accessed by the instruction at dpc */
898 /* opcode is first 7 bits of the instruction */
899 uint8_t opcode = instruction & 0x7F;
902 riscv_reg_t mem_addr;
904 if (opcode == MATCH_LB || opcode == MATCH_SB) {
905 rs1 = (instruction & 0xf8000) >> 15;
906 riscv_get_register(target, &mem_addr, rs1);
908 if (opcode == MATCH_SB) {
909 LOG_DEBUG("%x is store instruction", instruction);
910 imm = ((instruction & 0xf80) >> 7) | ((instruction & 0xfe000000) >> 20);
912 LOG_DEBUG("%x is load instruction", instruction);
913 imm = (instruction & 0xfff00000) >> 20;
915 /* sign extend 12-bit imm to 16-bits */
919 LOG_DEBUG("memory address=0x%" PRIx64, mem_addr);
921 LOG_DEBUG("%x is not a RV32I load or store", instruction);
926 /*TODO support length/mask */
927 if (wp->address == mem_addr) {
928 *hit_watchpoint = wp;
929 LOG_DEBUG("Hit address=%" TARGET_PRIxADDR, wp->address);
935 /* No match found - either we hit a watchpoint caused by an instruction that
936 * this function does not yet disassemble, or we hit a breakpoint.
938 * OpenOCD will behave as if this function had never been implemented i.e.
939 * report the halt to GDB with no address information. */
944 static int oldriscv_step(struct target *target, int current, uint32_t address,
945 int handle_breakpoints)
947 struct target_type *tt = get_target_type(target);
948 return tt->step(target, current, address, handle_breakpoints);
951 static int old_or_new_riscv_step(struct target *target, int current,
952 target_addr_t address, int handle_breakpoints)
955 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
956 if (r->is_halted == NULL)
957 return oldriscv_step(target, current, address, handle_breakpoints);
959 return riscv_openocd_step(target, current, address, handle_breakpoints);
963 static int riscv_examine(struct target *target)
965 LOG_DEBUG("riscv_examine()");
966 if (target_was_examined(target)) {
967 LOG_DEBUG("Target was already examined.");
971 /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
973 riscv_info_t *info = (riscv_info_t *) target->arch_info;
974 uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
975 LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
976 info->dtm_version = get_field(dtmcontrol, DTMCONTROL_VERSION);
977 LOG_DEBUG(" version=0x%x", info->dtm_version);
979 struct target_type *tt = get_target_type(target);
983 int result = tt->init_target(info->cmd_ctx, target);
984 if (result != ERROR_OK)
987 return tt->examine(target);
990 static int oldriscv_poll(struct target *target)
992 struct target_type *tt = get_target_type(target);
993 return tt->poll(target);
996 static int old_or_new_riscv_poll(struct target *target)
999 if (r->is_halted == NULL)
1000 return oldriscv_poll(target);
1002 return riscv_openocd_poll(target);
1005 int halt_prep(struct target *target)
1008 for (int i = 0; i < riscv_count_harts(target); ++i) {
1009 if (!riscv_hart_enabled(target, i))
1012 LOG_DEBUG("[%s] prep hart, debug_reason=%d", target_name(target),
1013 target->debug_reason);
1014 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1016 if (riscv_is_halted(target)) {
1017 LOG_DEBUG("Hart %d is already halted (reason=%d).", i,
1018 target->debug_reason);
1020 if (r->halt_prep(target) != ERROR_OK)
1028 int riscv_halt_go_all_harts(struct target *target)
1031 for (int i = 0; i < riscv_count_harts(target); ++i) {
1032 if (!riscv_hart_enabled(target, i))
1035 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1037 if (riscv_is_halted(target)) {
1038 LOG_DEBUG("Hart %d is already halted.", i);
1040 if (r->halt_go(target) != ERROR_OK)
1045 riscv_invalidate_register_cache(target);
1050 int halt_go(struct target *target)
1052 riscv_info_t *r = riscv_info(target);
1054 if (r->is_halted == NULL) {
1055 struct target_type *tt = get_target_type(target);
1056 result = tt->halt(target);
1058 result = riscv_halt_go_all_harts(target);
1060 target->state = TARGET_HALTED;
1061 if (target->debug_reason == DBG_REASON_NOTHALTED)
1062 target->debug_reason = DBG_REASON_DBGRQ;
1067 static int halt_finish(struct target *target)
1069 return target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1072 int riscv_halt(struct target *target)
1076 if (r->is_halted == NULL) {
1077 struct target_type *tt = get_target_type(target);
1078 return tt->halt(target);
1081 LOG_DEBUG("[%d] halting all harts", target->coreid);
1083 int result = ERROR_OK;
1085 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1086 struct target *t = tlist->target;
1087 if (halt_prep(t) != ERROR_OK)
1088 result = ERROR_FAIL;
1091 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1092 struct target *t = tlist->target;
1093 riscv_info_t *i = riscv_info(t);
1095 if (halt_go(t) != ERROR_OK)
1096 result = ERROR_FAIL;
1100 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1101 struct target *t = tlist->target;
1102 if (halt_finish(t) != ERROR_OK)
1107 if (halt_prep(target) != ERROR_OK)
1108 result = ERROR_FAIL;
1109 if (halt_go(target) != ERROR_OK)
1110 result = ERROR_FAIL;
1111 if (halt_finish(target) != ERROR_OK)
1115 if (riscv_rtos_enabled(target)) {
1116 if (r->rtos_hartid != -1) {
1117 LOG_DEBUG("halt requested on RTOS hartid %d", r->rtos_hartid);
1118 target->rtos->current_threadid = r->rtos_hartid + 1;
1119 target->rtos->current_thread = r->rtos_hartid + 1;
1121 LOG_DEBUG("halt requested, but no known RTOS hartid");
1127 static int riscv_assert_reset(struct target *target)
1129 LOG_DEBUG("[%d]", target->coreid);
1130 struct target_type *tt = get_target_type(target);
1131 riscv_invalidate_register_cache(target);
1132 return tt->assert_reset(target);
1135 static int riscv_deassert_reset(struct target *target)
1137 LOG_DEBUG("[%d]", target->coreid);
1138 struct target_type *tt = get_target_type(target);
1139 return tt->deassert_reset(target);
1142 int riscv_resume_prep_all_harts(struct target *target)
1145 for (int i = 0; i < riscv_count_harts(target); ++i) {
1146 if (!riscv_hart_enabled(target, i))
1149 LOG_DEBUG("prep hart %d", i);
1150 if (riscv_set_current_hartid(target, i) != ERROR_OK)
1152 if (riscv_is_halted(target)) {
1153 if (r->resume_prep(target) != ERROR_OK)
1156 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
1160 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1166 /* state must be riscv_reg_t state[RISCV_MAX_HWBPS] = {0}; */
1167 static int disable_triggers(struct target *target, riscv_reg_t *state)
1171 LOG_DEBUG("deal with triggers");
1173 if (riscv_enumerate_triggers(target) != ERROR_OK)
1176 int hartid = riscv_current_hartid(target);
1177 if (r->manual_hwbp_set) {
1178 /* Look at every trigger that may have been set. */
1179 riscv_reg_t tselect;
1180 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1182 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1183 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1186 if (riscv_get_register(target, &tdata1, GDB_REGNO_TDATA1) != ERROR_OK)
1188 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target))) {
1190 if (riscv_set_register(target, GDB_REGNO_TDATA1, 0) != ERROR_OK)
1194 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1198 /* Just go through the triggers we manage. */
1199 struct watchpoint *watchpoint = target->watchpoints;
1201 while (watchpoint) {
1202 LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
1203 state[i] = watchpoint->set;
1204 if (watchpoint->set) {
1205 if (riscv_remove_watchpoint(target, watchpoint) != ERROR_OK)
1208 watchpoint = watchpoint->next;
1216 static int enable_triggers(struct target *target, riscv_reg_t *state)
1220 int hartid = riscv_current_hartid(target);
1222 if (r->manual_hwbp_set) {
1223 /* Look at every trigger that may have been set. */
1224 riscv_reg_t tselect;
1225 if (riscv_get_register(target, &tselect, GDB_REGNO_TSELECT) != ERROR_OK)
1227 for (unsigned t = 0; t < r->trigger_count[hartid]; t++) {
1228 if (state[t] != 0) {
1229 if (riscv_set_register(target, GDB_REGNO_TSELECT, t) != ERROR_OK)
1231 if (riscv_set_register(target, GDB_REGNO_TDATA1, state[t]) != ERROR_OK)
1235 if (riscv_set_register(target, GDB_REGNO_TSELECT, tselect) != ERROR_OK)
1239 struct watchpoint *watchpoint = target->watchpoints;
1241 while (watchpoint) {
1242 LOG_DEBUG("watchpoint %d: cleared=%" PRId64, i, state[i]);
1244 if (riscv_add_watchpoint(target, watchpoint) != ERROR_OK)
1247 watchpoint = watchpoint->next;
1256 * Get everything ready to resume.
1258 static int resume_prep(struct target *target, int current,
1259 target_addr_t address, int handle_breakpoints, int debug_execution)
1262 LOG_DEBUG("[%d]", target->coreid);
1265 riscv_set_register(target, GDB_REGNO_PC, address);
1267 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1268 /* To be able to run off a trigger, disable all the triggers, step, and
1269 * then resume as usual. */
1270 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
1272 if (disable_triggers(target, trigger_state) != ERROR_OK)
1275 if (old_or_new_riscv_step(target, true, 0, false) != ERROR_OK)
1278 if (enable_triggers(target, trigger_state) != ERROR_OK)
1283 if (riscv_resume_prep_all_harts(target) != ERROR_OK)
1287 LOG_DEBUG("[%d] mark as prepped", target->coreid);
1294 * Resume all the harts that have been prepped, as close to instantaneous as
1297 static int resume_go(struct target *target, int current,
1298 target_addr_t address, int handle_breakpoints, int debug_execution)
1300 riscv_info_t *r = riscv_info(target);
1302 if (r->is_halted == NULL) {
1303 struct target_type *tt = get_target_type(target);
1304 result = tt->resume(target, current, address, handle_breakpoints,
1307 result = riscv_resume_go_all_harts(target);
1313 static int resume_finish(struct target *target)
1315 register_cache_invalidate(target->reg_cache);
1317 target->state = TARGET_RUNNING;
1318 target->debug_reason = DBG_REASON_NOTHALTED;
1319 return target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1323 * @par single_hart When true, only resume a single hart even if SMP is
1324 * configured. This is used to run algorithms on just one hart.
1327 struct target *target,
1329 target_addr_t address,
1330 int handle_breakpoints,
1331 int debug_execution,
1334 LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
1335 int result = ERROR_OK;
1336 if (target->smp && !single_hart) {
1337 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1338 struct target *t = tlist->target;
1339 if (resume_prep(t, current, address, handle_breakpoints,
1340 debug_execution) != ERROR_OK)
1341 result = ERROR_FAIL;
1344 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1345 struct target *t = tlist->target;
1346 riscv_info_t *i = riscv_info(t);
1348 if (resume_go(t, current, address, handle_breakpoints,
1349 debug_execution) != ERROR_OK)
1350 result = ERROR_FAIL;
1354 for (struct target_list *tlist = target->head; tlist; tlist = tlist->next) {
1355 struct target *t = tlist->target;
1356 if (resume_finish(t) != ERROR_OK)
1361 if (resume_prep(target, current, address, handle_breakpoints,
1362 debug_execution) != ERROR_OK)
1363 result = ERROR_FAIL;
1364 if (resume_go(target, current, address, handle_breakpoints,
1365 debug_execution) != ERROR_OK)
1366 result = ERROR_FAIL;
1367 if (resume_finish(target) != ERROR_OK)
1374 static int riscv_target_resume(struct target *target, int current, target_addr_t address,
1375 int handle_breakpoints, int debug_execution)
1377 return riscv_resume(target, current, address, handle_breakpoints,
1378 debug_execution, false);
1381 static int riscv_select_current_hart(struct target *target)
1384 if (riscv_rtos_enabled(target)) {
1385 if (r->rtos_hartid == -1)
1386 r->rtos_hartid = target->rtos->current_threadid - 1;
1387 return riscv_set_current_hartid(target, r->rtos_hartid);
1389 return riscv_set_current_hartid(target, target->coreid);
1392 static int riscv_mmu(struct target *target, int *enabled)
1394 if (!riscv_enable_virt2phys) {
1399 if (riscv_rtos_enabled(target))
1400 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1402 /* Don't use MMU in explicit or effective M (machine) mode */
1404 if (riscv_get_register(target, &priv, GDB_REGNO_PRIV) != ERROR_OK) {
1405 LOG_ERROR("Failed to read priv register.");
1409 riscv_reg_t mstatus;
1410 if (riscv_get_register(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK) {
1411 LOG_ERROR("Failed to read mstatus register.");
1415 if ((get_field(mstatus, MSTATUS_MPRV) ? get_field(mstatus, MSTATUS_MPP) : priv) == PRV_M) {
1416 LOG_DEBUG("SATP/MMU ignored in Machine mode (mstatus=0x%" PRIx64 ").", mstatus);
1422 if (riscv_get_register(target, &satp, GDB_REGNO_SATP) != ERROR_OK) {
1423 LOG_DEBUG("Couldn't read SATP.");
1424 /* If we can't read SATP, then there must not be an MMU. */
1429 if (get_field(satp, RISCV_SATP_MODE(riscv_xlen(target))) == SATP_MODE_OFF) {
1430 LOG_DEBUG("MMU is disabled.");
1433 LOG_DEBUG("MMU is enabled.");
1440 static int riscv_address_translate(struct target *target,
1441 target_addr_t virtual, target_addr_t *physical)
1444 riscv_reg_t satp_value;
1447 target_addr_t table_address;
1448 virt2phys_info_t *info;
1452 if (riscv_rtos_enabled(target))
1453 riscv_set_current_hartid(target, target->rtos->current_thread - 1);
1455 int result = riscv_get_register(target, &satp_value, GDB_REGNO_SATP);
1456 if (result != ERROR_OK)
1459 unsigned xlen = riscv_xlen(target);
1460 mode = get_field(satp_value, RISCV_SATP_MODE(xlen));
1462 case SATP_MODE_SV32:
1465 case SATP_MODE_SV39:
1468 case SATP_MODE_SV48:
1472 LOG_ERROR("No translation or protection." \
1473 " (satp: 0x%" PRIx64 ")", satp_value);
1476 LOG_ERROR("The translation mode is not supported." \
1477 " (satp: 0x%" PRIx64 ")", satp_value);
1480 LOG_DEBUG("virtual=0x%" TARGET_PRIxADDR "; mode=%s", virtual, info->name);
1482 /* verify bits xlen-1:va_bits-1 are all equal */
1483 target_addr_t mask = ((target_addr_t)1 << (xlen - (info->va_bits - 1))) - 1;
1484 target_addr_t masked_msbs = (virtual >> (info->va_bits - 1)) & mask;
1485 if (masked_msbs != 0 && masked_msbs != mask) {
1486 LOG_ERROR("Virtual address 0x%" TARGET_PRIxADDR " is not sign-extended "
1487 "for %s mode.", virtual, info->name);
1491 ppn_value = get_field(satp_value, RISCV_SATP_PPN(xlen));
1492 table_address = ppn_value << RISCV_PGSHIFT;
1493 i = info->level - 1;
1495 uint64_t vpn = virtual >> info->vpn_shift[i];
1496 vpn &= info->vpn_mask[i];
1497 target_addr_t pte_address = table_address +
1498 (vpn << info->pte_shift);
1500 assert(info->pte_shift <= 3);
1501 int retval = r->read_memory(target, pte_address,
1502 4, (1 << info->pte_shift) / 4, buffer, 4);
1503 if (retval != ERROR_OK)
1506 if (info->pte_shift == 2)
1507 pte = buf_get_u32(buffer, 0, 32);
1509 pte = buf_get_u64(buffer, 0, 64);
1511 LOG_DEBUG("i=%d; PTE @0x%" TARGET_PRIxADDR " = 0x%" PRIx64, i,
1514 if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W)))
1517 if ((pte & PTE_R) || (pte & PTE_X)) /* Found leaf PTE. */
1523 ppn_value = pte >> PTE_PPN_SHIFT;
1524 table_address = ppn_value << RISCV_PGSHIFT;
1528 LOG_ERROR("Couldn't find the PTE.");
1532 /* Make sure to clear out the high bits that may be set. */
1533 *physical = virtual & (((target_addr_t)1 << info->va_bits) - 1);
1535 while (i < info->level) {
1536 ppn_value = pte >> info->pte_ppn_shift[i];
1537 ppn_value &= info->pte_ppn_mask[i];
1538 *physical &= ~(((target_addr_t)info->pa_ppn_mask[i]) <<
1539 info->pa_ppn_shift[i]);
1540 *physical |= (ppn_value << info->pa_ppn_shift[i]);
1543 LOG_DEBUG("0x%" TARGET_PRIxADDR " -> 0x%" TARGET_PRIxADDR, virtual,
1549 static int riscv_virt2phys(struct target *target, target_addr_t virtual, target_addr_t *physical)
1552 if (riscv_mmu(target, &enabled) == ERROR_OK) {
1556 if (riscv_address_translate(target, virtual, physical) == ERROR_OK)
1563 static int riscv_read_phys_memory(struct target *target, target_addr_t phys_address,
1564 uint32_t size, uint32_t count, uint8_t *buffer)
1567 if (riscv_select_current_hart(target) != ERROR_OK)
1569 return r->read_memory(target, phys_address, size, count, buffer, size);
1572 static int riscv_read_memory(struct target *target, target_addr_t address,
1573 uint32_t size, uint32_t count, uint8_t *buffer)
1576 LOG_WARNING("0-length read from 0x%" TARGET_PRIxADDR, address);
1580 if (riscv_select_current_hart(target) != ERROR_OK)
1583 target_addr_t physical_addr;
1584 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1585 address = physical_addr;
1588 return r->read_memory(target, address, size, count, buffer, size);
1591 static int riscv_write_phys_memory(struct target *target, target_addr_t phys_address,
1592 uint32_t size, uint32_t count, const uint8_t *buffer)
1594 if (riscv_select_current_hart(target) != ERROR_OK)
1596 struct target_type *tt = get_target_type(target);
1597 return tt->write_memory(target, phys_address, size, count, buffer);
1600 static int riscv_write_memory(struct target *target, target_addr_t address,
1601 uint32_t size, uint32_t count, const uint8_t *buffer)
1604 LOG_WARNING("0-length write to 0x%" TARGET_PRIxADDR, address);
1608 if (riscv_select_current_hart(target) != ERROR_OK)
1611 target_addr_t physical_addr;
1612 if (target->type->virt2phys(target, address, &physical_addr) == ERROR_OK)
1613 address = physical_addr;
1615 struct target_type *tt = get_target_type(target);
1616 return tt->write_memory(target, address, size, count, buffer);
1619 static int riscv_get_gdb_reg_list_internal(struct target *target,
1620 struct reg **reg_list[], int *reg_list_size,
1621 enum target_register_class reg_class, bool read)
1624 LOG_DEBUG("rtos_hartid=%d, current_hartid=%d, reg_class=%d, read=%d",
1625 r->rtos_hartid, r->current_hartid, reg_class, read);
1627 if (!target->reg_cache) {
1628 LOG_ERROR("Target not initialized. Return ERROR_FAIL.");
1632 if (riscv_select_current_hart(target) != ERROR_OK)
1635 switch (reg_class) {
1636 case REG_CLASS_GENERAL:
1637 *reg_list_size = 33;
1640 *reg_list_size = target->reg_cache->num_regs;
1643 LOG_ERROR("Unsupported reg_class: %d", reg_class);
1647 *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
1651 for (int i = 0; i < *reg_list_size; i++) {
1652 assert(!target->reg_cache->reg_list[i].valid ||
1653 target->reg_cache->reg_list[i].size > 0);
1654 (*reg_list)[i] = &target->reg_cache->reg_list[i];
1656 target->reg_cache->reg_list[i].exist &&
1657 !target->reg_cache->reg_list[i].valid) {
1658 if (target->reg_cache->reg_list[i].type->get(
1659 &target->reg_cache->reg_list[i]) != ERROR_OK)
1667 static int riscv_get_gdb_reg_list_noread(struct target *target,
1668 struct reg **reg_list[], int *reg_list_size,
1669 enum target_register_class reg_class)
1671 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1675 static int riscv_get_gdb_reg_list(struct target *target,
1676 struct reg **reg_list[], int *reg_list_size,
1677 enum target_register_class reg_class)
1679 return riscv_get_gdb_reg_list_internal(target, reg_list, reg_list_size,
1683 static int riscv_arch_state(struct target *target)
1685 struct target_type *tt = get_target_type(target);
1686 return tt->arch_state(target);
1689 /* Algorithm must end with a software breakpoint instruction. */
1690 static int riscv_run_algorithm(struct target *target, int num_mem_params,
1691 struct mem_param *mem_params, int num_reg_params,
1692 struct reg_param *reg_params, target_addr_t entry_point,
1693 target_addr_t exit_point, int timeout_ms, void *arch_info)
1695 riscv_info_t *info = (riscv_info_t *) target->arch_info;
1696 int hartid = riscv_current_hartid(target);
1698 if (num_mem_params > 0) {
1699 LOG_ERROR("Memory parameters are not supported for RISC-V algorithms.");
1703 if (target->state != TARGET_HALTED) {
1704 LOG_WARNING("target not halted");
1705 return ERROR_TARGET_NOT_HALTED;
1708 /* Save registers */
1709 struct reg *reg_pc = register_get_by_name(target->reg_cache, "pc", 1);
1710 if (!reg_pc || reg_pc->type->get(reg_pc) != ERROR_OK)
1712 uint64_t saved_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1713 LOG_DEBUG("saved_pc=0x%" PRIx64, saved_pc);
1715 uint64_t saved_regs[32];
1716 for (int i = 0; i < num_reg_params; i++) {
1717 LOG_DEBUG("save %s", reg_params[i].reg_name);
1718 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1720 LOG_ERROR("Couldn't find register named '%s'", reg_params[i].reg_name);
1724 if (r->size != reg_params[i].size) {
1725 LOG_ERROR("Register %s is %d bits instead of %d bits.",
1726 reg_params[i].reg_name, r->size, reg_params[i].size);
1730 if (r->number > GDB_REGNO_XPR31) {
1731 LOG_ERROR("Only GPRs can be use as argument registers.");
1735 if (r->type->get(r) != ERROR_OK)
1737 saved_regs[r->number] = buf_get_u64(r->value, 0, r->size);
1739 if (reg_params[i].direction == PARAM_OUT || reg_params[i].direction == PARAM_IN_OUT) {
1740 if (r->type->set(r, reg_params[i].value) != ERROR_OK)
1746 /* Disable Interrupts before attempting to run the algorithm. */
1747 uint64_t current_mstatus;
1748 uint8_t mstatus_bytes[8] = { 0 };
1750 LOG_DEBUG("Disabling Interrupts");
1751 struct reg *reg_mstatus = register_get_by_name(target->reg_cache,
1754 LOG_ERROR("Couldn't find mstatus!");
1758 reg_mstatus->type->get(reg_mstatus);
1759 current_mstatus = buf_get_u64(reg_mstatus->value, 0, reg_mstatus->size);
1760 uint64_t ie_mask = MSTATUS_MIE | MSTATUS_HIE | MSTATUS_SIE | MSTATUS_UIE;
1761 buf_set_u64(mstatus_bytes, 0, info->xlen[0], set_field(current_mstatus,
1764 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1767 LOG_DEBUG("resume at 0x%" TARGET_PRIxADDR, entry_point);
1768 if (riscv_resume(target, 0, entry_point, 0, 0, true) != ERROR_OK)
1771 int64_t start = timeval_ms();
1772 while (target->state != TARGET_HALTED) {
1773 LOG_DEBUG("poll()");
1774 int64_t now = timeval_ms();
1775 if (now - start > timeout_ms) {
1776 LOG_ERROR("Algorithm timed out after %" PRId64 " ms.", now - start);
1778 old_or_new_riscv_poll(target);
1779 enum gdb_regno regnums[] = {
1780 GDB_REGNO_RA, GDB_REGNO_SP, GDB_REGNO_GP, GDB_REGNO_TP,
1781 GDB_REGNO_T0, GDB_REGNO_T1, GDB_REGNO_T2, GDB_REGNO_FP,
1782 GDB_REGNO_S1, GDB_REGNO_A0, GDB_REGNO_A1, GDB_REGNO_A2,
1783 GDB_REGNO_A3, GDB_REGNO_A4, GDB_REGNO_A5, GDB_REGNO_A6,
1784 GDB_REGNO_A7, GDB_REGNO_S2, GDB_REGNO_S3, GDB_REGNO_S4,
1785 GDB_REGNO_S5, GDB_REGNO_S6, GDB_REGNO_S7, GDB_REGNO_S8,
1786 GDB_REGNO_S9, GDB_REGNO_S10, GDB_REGNO_S11, GDB_REGNO_T3,
1787 GDB_REGNO_T4, GDB_REGNO_T5, GDB_REGNO_T6,
1789 GDB_REGNO_MSTATUS, GDB_REGNO_MEPC, GDB_REGNO_MCAUSE,
1791 for (unsigned i = 0; i < DIM(regnums); i++) {
1792 enum gdb_regno regno = regnums[i];
1793 riscv_reg_t reg_value;
1794 if (riscv_get_register(target, ®_value, regno) != ERROR_OK)
1796 LOG_ERROR("%s = 0x%" PRIx64, gdb_regno_name(regno), reg_value);
1798 return ERROR_TARGET_TIMEOUT;
1801 int result = old_or_new_riscv_poll(target);
1802 if (result != ERROR_OK)
1806 /* The current hart id might have been changed in poll(). */
1807 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1810 if (reg_pc->type->get(reg_pc) != ERROR_OK)
1812 uint64_t final_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
1813 if (exit_point && final_pc != exit_point) {
1814 LOG_ERROR("PC ended up at 0x%" PRIx64 " instead of 0x%"
1815 TARGET_PRIxADDR, final_pc, exit_point);
1819 /* Restore Interrupts */
1820 LOG_DEBUG("Restoring Interrupts");
1821 buf_set_u64(mstatus_bytes, 0, info->xlen[0], current_mstatus);
1822 reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
1824 /* Restore registers */
1825 uint8_t buf[8] = { 0 };
1826 buf_set_u64(buf, 0, info->xlen[0], saved_pc);
1827 if (reg_pc->type->set(reg_pc, buf) != ERROR_OK)
1830 for (int i = 0; i < num_reg_params; i++) {
1831 if (reg_params[i].direction == PARAM_IN ||
1832 reg_params[i].direction == PARAM_IN_OUT) {
1833 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1834 if (r->type->get(r) != ERROR_OK) {
1835 LOG_ERROR("get(%s) failed", r->name);
1838 buf_cpy(r->value, reg_params[i].value, reg_params[i].size);
1840 LOG_DEBUG("restore %s", reg_params[i].reg_name);
1841 struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
1842 buf_set_u64(buf, 0, info->xlen[0], saved_regs[r->number]);
1843 if (r->type->set(r, buf) != ERROR_OK) {
1844 LOG_ERROR("set(%s) failed", r->name);
1852 static int riscv_checksum_memory(struct target *target,
1853 target_addr_t address, uint32_t count,
1856 struct working_area *crc_algorithm;
1857 struct reg_param reg_params[2];
1860 LOG_DEBUG("address=0x%" TARGET_PRIxADDR "; count=0x%" PRIx32, address, count);
1862 static const uint8_t riscv32_crc_code[] = {
1863 #include "../../contrib/loaders/checksum/riscv32_crc.inc"
1865 static const uint8_t riscv64_crc_code[] = {
1866 #include "../../contrib/loaders/checksum/riscv64_crc.inc"
1869 static const uint8_t *crc_code;
1871 unsigned xlen = riscv_xlen(target);
1872 unsigned crc_code_size;
1874 crc_code = riscv32_crc_code;
1875 crc_code_size = sizeof(riscv32_crc_code);
1877 crc_code = riscv64_crc_code;
1878 crc_code_size = sizeof(riscv64_crc_code);
1881 if (count < crc_code_size * 4) {
1882 /* Don't use the algorithm for relatively small buffers. It's faster
1883 * just to read the memory. target_checksum_memory() will take care of
1884 * that if we fail. */
1888 retval = target_alloc_working_area(target, crc_code_size, &crc_algorithm);
1889 if (retval != ERROR_OK)
1892 if (crc_algorithm->address + crc_algorithm->size > address &&
1893 crc_algorithm->address < address + count) {
1894 /* Region to checksum overlaps with the work area we've been assigned.
1895 * Bail. (Would be better to manually checksum what we read there, and
1896 * use the algorithm for the rest.) */
1897 target_free_working_area(target, crc_algorithm);
1901 retval = target_write_buffer(target, crc_algorithm->address, crc_code_size,
1903 if (retval != ERROR_OK) {
1904 LOG_ERROR("Failed to write code to " TARGET_ADDR_FMT ": %d",
1905 crc_algorithm->address, retval);
1906 target_free_working_area(target, crc_algorithm);
1910 init_reg_param(®_params[0], "a0", xlen, PARAM_IN_OUT);
1911 init_reg_param(®_params[1], "a1", xlen, PARAM_OUT);
1912 buf_set_u64(reg_params[0].value, 0, xlen, address);
1913 buf_set_u64(reg_params[1].value, 0, xlen, count);
1915 /* 20 second timeout/megabyte */
1916 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1918 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1919 crc_algorithm->address,
1920 0, /* Leave exit point unspecified because we don't know. */
1923 if (retval == ERROR_OK)
1924 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1926 LOG_ERROR("error executing RISC-V CRC algorithm");
1928 destroy_reg_param(®_params[0]);
1929 destroy_reg_param(®_params[1]);
1931 target_free_working_area(target, crc_algorithm);
1933 LOG_DEBUG("checksum=0x%" PRIx32 ", result=%d", *checksum, retval);
1938 /*** OpenOCD Helper Functions ***/
1940 enum riscv_poll_hart {
1942 RPH_DISCOVERED_HALTED,
1943 RPH_DISCOVERED_RUNNING,
1946 static enum riscv_poll_hart riscv_poll_hart(struct target *target, int hartid)
1949 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
1952 LOG_DEBUG("polling hart %d, target->state=%d", hartid, target->state);
1954 /* If OpenOCD thinks we're running but this hart is halted then it's time
1955 * to raise an event. */
1956 bool halted = riscv_is_halted(target);
1957 if (target->state != TARGET_HALTED && halted) {
1958 LOG_DEBUG(" triggered a halt");
1960 return RPH_DISCOVERED_HALTED;
1961 } else if (target->state != TARGET_RUNNING && !halted) {
1962 LOG_DEBUG(" triggered running");
1963 target->state = TARGET_RUNNING;
1964 target->debug_reason = DBG_REASON_NOTHALTED;
1965 return RPH_DISCOVERED_RUNNING;
1968 return RPH_NO_CHANGE;
1971 int set_debug_reason(struct target *target, enum riscv_halt_reason halt_reason)
1973 switch (halt_reason) {
1974 case RISCV_HALT_BREAKPOINT:
1975 target->debug_reason = DBG_REASON_BREAKPOINT;
1977 case RISCV_HALT_TRIGGER:
1978 target->debug_reason = DBG_REASON_WATCHPOINT;
1980 case RISCV_HALT_INTERRUPT:
1981 case RISCV_HALT_GROUP:
1982 target->debug_reason = DBG_REASON_DBGRQ;
1984 case RISCV_HALT_SINGLESTEP:
1985 target->debug_reason = DBG_REASON_SINGLESTEP;
1987 case RISCV_HALT_UNKNOWN:
1988 target->debug_reason = DBG_REASON_UNDEFINED;
1990 case RISCV_HALT_ERROR:
1993 LOG_DEBUG("[%s] debug_reason=%d", target_name(target), target->debug_reason);
1997 /*** OpenOCD Interface ***/
1998 int riscv_openocd_poll(struct target *target)
2000 LOG_DEBUG("polling all harts");
2001 int halted_hart = -1;
2002 if (riscv_rtos_enabled(target)) {
2003 /* Check every hart for an event. */
2004 for (int i = 0; i < riscv_count_harts(target); ++i) {
2005 enum riscv_poll_hart out = riscv_poll_hart(target, i);
2008 case RPH_DISCOVERED_RUNNING:
2010 case RPH_DISCOVERED_HALTED:
2017 if (halted_hart == -1) {
2018 LOG_DEBUG(" no harts just halted, target->state=%d", target->state);
2021 LOG_DEBUG(" hart %d halted", halted_hart);
2023 target->state = TARGET_HALTED;
2024 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
2025 if (set_debug_reason(target, halt_reason) != ERROR_OK)
2028 target->rtos->current_threadid = halted_hart + 1;
2029 target->rtos->current_thread = halted_hart + 1;
2030 riscv_set_rtos_hartid(target, halted_hart);
2032 /* If we're here then at least one hart triggered. That means we want
2033 * to go and halt _every_ hart (configured with -rtos riscv) in the
2034 * system, as that's the invariant we hold here. Some harts might have
2035 * already halted (as we're either in single-step mode or they also
2036 * triggered a breakpoint), so don't attempt to halt those harts.
2037 * riscv_halt() will do all that for us. */
2040 } else if (target->smp) {
2041 unsigned halts_discovered = 0;
2042 unsigned total_targets = 0;
2043 unsigned should_remain_halted = 0;
2044 unsigned should_resume = 0;
2046 for (struct target_list *list = target->head; list != NULL;
2047 list = list->next, i++) {
2049 struct target *t = list->target;
2050 riscv_info_t *r = riscv_info(t);
2051 enum riscv_poll_hart out = riscv_poll_hart(t, r->current_hartid);
2055 case RPH_DISCOVERED_RUNNING:
2056 t->state = TARGET_RUNNING;
2057 t->debug_reason = DBG_REASON_NOTHALTED;
2059 case RPH_DISCOVERED_HALTED:
2061 t->state = TARGET_HALTED;
2062 enum riscv_halt_reason halt_reason =
2063 riscv_halt_reason(t, r->current_hartid);
2064 if (set_debug_reason(t, halt_reason) != ERROR_OK)
2067 if (halt_reason == RISCV_HALT_BREAKPOINT) {
2069 switch (riscv_semihosting(t, &retval)) {
2072 /* This hart should remain halted. */
2073 should_remain_halted++;
2076 /* This hart should be resumed, along with any other
2077 * harts that halted due to haltgroups. */
2083 } else if (halt_reason != RISCV_HALT_GROUP) {
2084 should_remain_halted++;
2093 LOG_DEBUG("should_remain_halted=%d, should_resume=%d",
2094 should_remain_halted, should_resume);
2095 if (should_remain_halted && should_resume) {
2096 LOG_WARNING("%d harts should remain halted, and %d should resume.",
2097 should_remain_halted, should_resume);
2099 if (should_remain_halted) {
2100 LOG_DEBUG("halt all");
2102 } else if (should_resume) {
2103 LOG_DEBUG("resume all");
2104 riscv_resume(target, true, 0, 0, 0, false);
2109 enum riscv_poll_hart out = riscv_poll_hart(target,
2110 riscv_current_hartid(target));
2111 if (out == RPH_NO_CHANGE || out == RPH_DISCOVERED_RUNNING)
2113 else if (out == RPH_ERROR)
2116 halted_hart = riscv_current_hartid(target);
2117 LOG_DEBUG(" hart %d halted", halted_hart);
2119 enum riscv_halt_reason halt_reason = riscv_halt_reason(target, halted_hart);
2120 if (set_debug_reason(target, halt_reason) != ERROR_OK)
2122 target->state = TARGET_HALTED;
2125 if (target->debug_reason == DBG_REASON_BREAKPOINT) {
2127 switch (riscv_semihosting(target, &retval)) {
2130 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2133 if (riscv_resume(target, true, 0, 0, 0, false) != ERROR_OK)
2140 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2146 int riscv_openocd_step(struct target *target, int current,
2147 target_addr_t address, int handle_breakpoints)
2149 LOG_DEBUG("stepping rtos hart");
2152 riscv_set_register(target, GDB_REGNO_PC, address);
2154 riscv_reg_t trigger_state[RISCV_MAX_HWBPS] = {0};
2155 if (disable_triggers(target, trigger_state) != ERROR_OK)
2158 int out = riscv_step_rtos_hart(target);
2159 if (out != ERROR_OK) {
2160 LOG_ERROR("unable to step rtos hart");
2164 register_cache_invalidate(target->reg_cache);
2166 if (enable_triggers(target, trigger_state) != ERROR_OK)
2169 target->state = TARGET_RUNNING;
2170 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
2171 target->state = TARGET_HALTED;
2172 target->debug_reason = DBG_REASON_SINGLESTEP;
2173 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2177 /* Command Handlers */
2178 COMMAND_HANDLER(riscv_set_command_timeout_sec)
2180 if (CMD_ARGC != 1) {
2181 LOG_ERROR("Command takes exactly 1 parameter");
2182 return ERROR_COMMAND_SYNTAX_ERROR;
2184 int timeout = atoi(CMD_ARGV[0]);
2186 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2190 riscv_command_timeout_sec = timeout;
2195 COMMAND_HANDLER(riscv_set_reset_timeout_sec)
2197 if (CMD_ARGC != 1) {
2198 LOG_ERROR("Command takes exactly 1 parameter");
2199 return ERROR_COMMAND_SYNTAX_ERROR;
2201 int timeout = atoi(CMD_ARGV[0]);
2203 LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
2207 riscv_reset_timeout_sec = timeout;
2211 COMMAND_HANDLER(riscv_test_compliance) {
2213 struct target *target = get_current_target(CMD_CTX);
2218 LOG_ERROR("Command does not take any parameters.");
2219 return ERROR_COMMAND_SYNTAX_ERROR;
2222 if (r->test_compliance) {
2223 return r->test_compliance(target);
2225 LOG_ERROR("This target does not support this command (may implement an older version of the spec).");
2230 COMMAND_HANDLER(riscv_set_prefer_sba)
2232 if (CMD_ARGC != 1) {
2233 LOG_ERROR("Command takes exactly 1 parameter");
2234 return ERROR_COMMAND_SYNTAX_ERROR;
2236 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_prefer_sba);
2240 COMMAND_HANDLER(riscv_set_enable_virtual)
2242 if (CMD_ARGC != 1) {
2243 LOG_ERROR("Command takes exactly 1 parameter");
2244 return ERROR_COMMAND_SYNTAX_ERROR;
2246 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virtual);
2250 void parse_error(const char *string, char c, unsigned position)
2252 char buf[position+2];
2253 for (unsigned i = 0; i < position; i++)
2255 buf[position] = '^';
2256 buf[position + 1] = 0;
2258 LOG_ERROR("Parse error at character %c in:", c);
2259 LOG_ERROR("%s", string);
2260 LOG_ERROR("%s", buf);
2263 int parse_ranges(range_t **ranges, const char **argv)
2265 for (unsigned pass = 0; pass < 2; pass++) {
2268 bool parse_low = true;
2270 for (unsigned i = 0; i == 0 || argv[0][i-1]; i++) {
2271 char c = argv[0][i];
2273 /* Ignore whitespace. */
2281 } else if (c == '-') {
2283 } else if (c == ',' || c == 0) {
2285 (*ranges)[range].low = low;
2286 (*ranges)[range].high = low;
2291 parse_error(argv[0], c, i);
2292 return ERROR_COMMAND_SYNTAX_ERROR;
2299 } else if (c == ',' || c == 0) {
2302 (*ranges)[range].low = low;
2303 (*ranges)[range].high = high;
2309 parse_error(argv[0], c, i);
2310 return ERROR_COMMAND_SYNTAX_ERROR;
2317 *ranges = calloc(range + 2, sizeof(range_t));
2321 (*ranges)[range].low = 1;
2322 (*ranges)[range].high = 0;
2329 COMMAND_HANDLER(riscv_set_expose_csrs)
2331 if (CMD_ARGC != 1) {
2332 LOG_ERROR("Command takes exactly 1 parameter");
2333 return ERROR_COMMAND_SYNTAX_ERROR;
2336 return parse_ranges(&expose_csr, CMD_ARGV);
2339 COMMAND_HANDLER(riscv_set_expose_custom)
2341 if (CMD_ARGC != 1) {
2342 LOG_ERROR("Command takes exactly 1 parameter");
2343 return ERROR_COMMAND_SYNTAX_ERROR;
2346 return parse_ranges(&expose_custom, CMD_ARGV);
2349 COMMAND_HANDLER(riscv_authdata_read)
2351 if (CMD_ARGC != 0) {
2352 LOG_ERROR("Command takes no parameters");
2353 return ERROR_COMMAND_SYNTAX_ERROR;
2356 struct target *target = get_current_target(CMD_CTX);
2358 LOG_ERROR("target is NULL!");
2364 LOG_ERROR("riscv_info is NULL!");
2368 if (r->authdata_read) {
2370 if (r->authdata_read(target, &value) != ERROR_OK)
2372 command_print_sameline(CMD, "0x%08" PRIx32, value);
2375 LOG_ERROR("authdata_read is not implemented for this target.");
2380 COMMAND_HANDLER(riscv_authdata_write)
2382 if (CMD_ARGC != 1) {
2383 LOG_ERROR("Command takes exactly 1 argument");
2384 return ERROR_COMMAND_SYNTAX_ERROR;
2387 struct target *target = get_current_target(CMD_CTX);
2391 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], value);
2393 if (r->authdata_write) {
2394 return r->authdata_write(target, value);
2396 LOG_ERROR("authdata_write is not implemented for this target.");
2401 COMMAND_HANDLER(riscv_dmi_read)
2403 if (CMD_ARGC != 1) {
2404 LOG_ERROR("Command takes 1 parameter");
2405 return ERROR_COMMAND_SYNTAX_ERROR;
2408 struct target *target = get_current_target(CMD_CTX);
2410 LOG_ERROR("target is NULL!");
2416 LOG_ERROR("riscv_info is NULL!");
2421 uint32_t address, value;
2422 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2423 if (r->dmi_read(target, &value, address) != ERROR_OK)
2425 command_print(CMD, "0x%" PRIx32, value);
2428 LOG_ERROR("dmi_read is not implemented for this target.");
2434 COMMAND_HANDLER(riscv_dmi_write)
2436 if (CMD_ARGC != 2) {
2437 LOG_ERROR("Command takes exactly 2 arguments");
2438 return ERROR_COMMAND_SYNTAX_ERROR;
2441 struct target *target = get_current_target(CMD_CTX);
2444 uint32_t address, value;
2445 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
2446 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2449 return r->dmi_write(target, address, value);
2451 LOG_ERROR("dmi_write is not implemented for this target.");
2456 COMMAND_HANDLER(riscv_test_sba_config_reg)
2458 if (CMD_ARGC != 4) {
2459 LOG_ERROR("Command takes exactly 4 arguments");
2460 return ERROR_COMMAND_SYNTAX_ERROR;
2463 struct target *target = get_current_target(CMD_CTX);
2466 target_addr_t legal_address;
2468 target_addr_t illegal_address;
2469 bool run_sbbusyerror_test;
2471 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[0], legal_address);
2472 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], num_words);
2473 COMMAND_PARSE_NUMBER(target_addr, CMD_ARGV[2], illegal_address);
2474 COMMAND_PARSE_ON_OFF(CMD_ARGV[3], run_sbbusyerror_test);
2476 if (r->test_sba_config_reg) {
2477 return r->test_sba_config_reg(target, legal_address, num_words,
2478 illegal_address, run_sbbusyerror_test);
2480 LOG_ERROR("test_sba_config_reg is not implemented for this target.");
2485 COMMAND_HANDLER(riscv_reset_delays)
2490 LOG_ERROR("Command takes at most one argument");
2491 return ERROR_COMMAND_SYNTAX_ERROR;
2495 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], wait);
2497 struct target *target = get_current_target(CMD_CTX);
2499 r->reset_delays_wait = wait;
2503 COMMAND_HANDLER(riscv_set_ir)
2505 if (CMD_ARGC != 2) {
2506 LOG_ERROR("Command takes exactly 2 arguments");
2507 return ERROR_COMMAND_SYNTAX_ERROR;
2511 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2513 if (!strcmp(CMD_ARGV[0], "idcode"))
2514 buf_set_u32(ir_idcode, 0, 32, value);
2515 else if (!strcmp(CMD_ARGV[0], "dtmcs"))
2516 buf_set_u32(ir_dtmcontrol, 0, 32, value);
2517 else if (!strcmp(CMD_ARGV[0], "dmi"))
2518 buf_set_u32(ir_dbus, 0, 32, value);
2525 COMMAND_HANDLER(riscv_resume_order)
2528 LOG_ERROR("Command takes at most one argument");
2529 return ERROR_COMMAND_SYNTAX_ERROR;
2532 if (!strcmp(CMD_ARGV[0], "normal")) {
2533 resume_order = RO_NORMAL;
2534 } else if (!strcmp(CMD_ARGV[0], "reversed")) {
2535 resume_order = RO_REVERSED;
2537 LOG_ERROR("Unsupported resume order: %s", CMD_ARGV[0]);
2544 COMMAND_HANDLER(riscv_use_bscan_tunnel)
2547 int tunnel_type = BSCAN_TUNNEL_NESTED_TAP;
2550 LOG_ERROR("Command takes at most two arguments");
2551 return ERROR_COMMAND_SYNTAX_ERROR;
2552 } else if (CMD_ARGC == 1) {
2553 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2554 } else if (CMD_ARGC == 2) {
2555 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], irwidth);
2556 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], tunnel_type);
2558 if (tunnel_type == BSCAN_TUNNEL_NESTED_TAP)
2559 LOG_INFO("Nested Tap based Bscan Tunnel Selected");
2560 else if (tunnel_type == BSCAN_TUNNEL_DATA_REGISTER)
2561 LOG_INFO("Simple Register based Bscan Tunnel Selected");
2563 LOG_INFO("Invalid Tunnel type selected ! : selecting default Nested Tap Type");
2565 bscan_tunnel_type = tunnel_type;
2566 bscan_tunnel_ir_width = irwidth;
2570 COMMAND_HANDLER(riscv_set_enable_virt2phys)
2572 if (CMD_ARGC != 1) {
2573 LOG_ERROR("Command takes exactly 1 parameter");
2574 return ERROR_COMMAND_SYNTAX_ERROR;
2576 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_enable_virt2phys);
2580 COMMAND_HANDLER(riscv_set_ebreakm)
2582 if (CMD_ARGC != 1) {
2583 LOG_ERROR("Command takes exactly 1 parameter");
2584 return ERROR_COMMAND_SYNTAX_ERROR;
2586 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreakm);
2590 COMMAND_HANDLER(riscv_set_ebreaks)
2592 if (CMD_ARGC != 1) {
2593 LOG_ERROR("Command takes exactly 1 parameter");
2594 return ERROR_COMMAND_SYNTAX_ERROR;
2596 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaks);
2600 COMMAND_HANDLER(riscv_set_ebreaku)
2602 if (CMD_ARGC != 1) {
2603 LOG_ERROR("Command takes exactly 1 parameter");
2604 return ERROR_COMMAND_SYNTAX_ERROR;
2606 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_ebreaku);
2610 static const struct command_registration riscv_exec_command_handlers[] = {
2612 .name = "test_compliance",
2613 .handler = riscv_test_compliance,
2615 .mode = COMMAND_EXEC,
2616 .help = "Runs a basic compliance test suite against the RISC-V Debug Spec."
2619 .name = "set_command_timeout_sec",
2620 .handler = riscv_set_command_timeout_sec,
2621 .mode = COMMAND_ANY,
2623 .help = "Set the wall-clock timeout (in seconds) for individual commands"
2626 .name = "set_reset_timeout_sec",
2627 .handler = riscv_set_reset_timeout_sec,
2628 .mode = COMMAND_ANY,
2630 .help = "Set the wall-clock timeout (in seconds) after reset is deasserted"
2633 .name = "set_prefer_sba",
2634 .handler = riscv_set_prefer_sba,
2635 .mode = COMMAND_ANY,
2637 .help = "When on, prefer to use System Bus Access to access memory. "
2638 "When off (default), prefer to use the Program Buffer to access memory."
2641 .name = "set_enable_virtual",
2642 .handler = riscv_set_enable_virtual,
2643 .mode = COMMAND_ANY,
2645 .help = "When on, memory accesses are performed on physical or virtual "
2646 "memory depending on the current system configuration. "
2647 "When off (default), all memory accessses are performed on physical memory."
2650 .name = "expose_csrs",
2651 .handler = riscv_set_expose_csrs,
2652 .mode = COMMAND_ANY,
2653 .usage = "n0[-m0][,n1[-m1]]...",
2654 .help = "Configure a list of inclusive ranges for CSRs to expose in "
2655 "addition to the standard ones. This must be executed before "
2659 .name = "expose_custom",
2660 .handler = riscv_set_expose_custom,
2661 .mode = COMMAND_ANY,
2662 .usage = "n0[-m0][,n1[-m1]]...",
2663 .help = "Configure a list of inclusive ranges for custom registers to "
2664 "expose. custom0 is accessed as abstract register number 0xc000, "
2665 "etc. This must be executed before `init`."
2668 .name = "authdata_read",
2669 .handler = riscv_authdata_read,
2671 .mode = COMMAND_ANY,
2672 .help = "Return the 32-bit value read from authdata."
2675 .name = "authdata_write",
2676 .handler = riscv_authdata_write,
2677 .mode = COMMAND_ANY,
2679 .help = "Write the 32-bit value to authdata."
2683 .handler = riscv_dmi_read,
2684 .mode = COMMAND_ANY,
2686 .help = "Perform a 32-bit DMI read at address, returning the value."
2689 .name = "dmi_write",
2690 .handler = riscv_dmi_write,
2691 .mode = COMMAND_ANY,
2692 .usage = "address value",
2693 .help = "Perform a 32-bit DMI write of value at address."
2696 .name = "test_sba_config_reg",
2697 .handler = riscv_test_sba_config_reg,
2698 .mode = COMMAND_ANY,
2699 .usage = "legal_address num_words "
2700 "illegal_address run_sbbusyerror_test[on/off]",
2701 .help = "Perform a series of tests on the SBCS register. "
2702 "Inputs are a legal, 128-byte aligned address and a number of words to "
2703 "read/write starting at that address (i.e., address range [legal address, "
2704 "legal_address+word_size*num_words) must be legally readable/writable), "
2705 "an illegal, 128-byte aligned address for error flag/handling cases, "
2706 "and whether sbbusyerror test should be run."
2709 .name = "reset_delays",
2710 .handler = riscv_reset_delays,
2711 .mode = COMMAND_ANY,
2713 .help = "OpenOCD learns how many Run-Test/Idle cycles are required "
2714 "between scans to avoid encountering the target being busy. This "
2715 "command resets those learned values after `wait` scans. It's only "
2716 "useful for testing OpenOCD itself."
2719 .name = "resume_order",
2720 .handler = riscv_resume_order,
2721 .mode = COMMAND_ANY,
2722 .usage = "normal|reversed",
2723 .help = "Choose the order that harts are resumed in when `hasel` is not "
2724 "supported. Normal order is from lowest hart index to highest. "
2725 "Reversed order is from highest hart index to lowest."
2729 .handler = riscv_set_ir,
2730 .mode = COMMAND_ANY,
2731 .usage = "[idcode|dtmcs|dmi] value",
2732 .help = "Set IR value for specified JTAG register."
2735 .name = "use_bscan_tunnel",
2736 .handler = riscv_use_bscan_tunnel,
2737 .mode = COMMAND_ANY,
2738 .usage = "value [type]",
2739 .help = "Enable or disable use of a BSCAN tunnel to reach DM. Supply "
2740 "the width of the DM transport TAP's instruction register to "
2741 "enable. Supply a value of 0 to disable. Pass A second argument "
2742 "(optional) to indicate Bscan Tunnel Type {0:(default) NESTED_TAP , "
2746 .name = "set_enable_virt2phys",
2747 .handler = riscv_set_enable_virt2phys,
2748 .mode = COMMAND_ANY,
2750 .help = "When on (default), enable translation from virtual address to "
2754 .name = "set_ebreakm",
2755 .handler = riscv_set_ebreakm,
2756 .mode = COMMAND_ANY,
2758 .help = "Control dcsr.ebreakm. When off, M-mode ebreak instructions "
2759 "don't trap to OpenOCD. Defaults to on."
2762 .name = "set_ebreaks",
2763 .handler = riscv_set_ebreaks,
2764 .mode = COMMAND_ANY,
2766 .help = "Control dcsr.ebreaks. When off, S-mode ebreak instructions "
2767 "don't trap to OpenOCD. Defaults to on."
2770 .name = "set_ebreaku",
2771 .handler = riscv_set_ebreaku,
2772 .mode = COMMAND_ANY,
2774 .help = "Control dcsr.ebreaku. When off, U-mode ebreak instructions "
2775 "don't trap to OpenOCD. Defaults to on."
2777 COMMAND_REGISTRATION_DONE
2781 * To be noted that RISC-V targets use the same semihosting commands as
2784 * The main reason is compatibility with existing tools. For example the
2785 * Eclipse OpenOCD/SEGGER J-Link/QEMU plug-ins have several widgets to
2786 * configure semihosting, which generate commands like `arm semihosting
2788 * A secondary reason is the fact that the protocol used is exactly the
2789 * one specified by ARM. If RISC-V will ever define its own semihosting
2790 * protocol, then a command like `riscv semihosting enable` will make
2791 * sense, but for now all semihosting commands are prefixed with `arm`.
2793 extern const struct command_registration semihosting_common_handlers[];
2795 const struct command_registration riscv_command_handlers[] = {
2798 .mode = COMMAND_ANY,
2799 .help = "RISC-V Command Group",
2801 .chain = riscv_exec_command_handlers
2805 .mode = COMMAND_ANY,
2806 .help = "ARM Command Group",
2808 .chain = semihosting_common_handlers
2810 COMMAND_REGISTRATION_DONE
2813 static unsigned riscv_xlen_nonconst(struct target *target)
2815 return riscv_xlen(target);
2818 struct target_type riscv_target = {
2821 .init_target = riscv_init_target,
2822 .deinit_target = riscv_deinit_target,
2823 .examine = riscv_examine,
2825 /* poll current target status */
2826 .poll = old_or_new_riscv_poll,
2829 .resume = riscv_target_resume,
2830 .step = old_or_new_riscv_step,
2832 .assert_reset = riscv_assert_reset,
2833 .deassert_reset = riscv_deassert_reset,
2835 .read_memory = riscv_read_memory,
2836 .write_memory = riscv_write_memory,
2837 .read_phys_memory = riscv_read_phys_memory,
2838 .write_phys_memory = riscv_write_phys_memory,
2840 .checksum_memory = riscv_checksum_memory,
2843 .virt2phys = riscv_virt2phys,
2845 .get_gdb_reg_list = riscv_get_gdb_reg_list,
2846 .get_gdb_reg_list_noread = riscv_get_gdb_reg_list_noread,
2848 .add_breakpoint = riscv_add_breakpoint,
2849 .remove_breakpoint = riscv_remove_breakpoint,
2851 .add_watchpoint = riscv_add_watchpoint,
2852 .remove_watchpoint = riscv_remove_watchpoint,
2853 .hit_watchpoint = riscv_hit_watchpoint,
2855 .arch_state = riscv_arch_state,
2857 .run_algorithm = riscv_run_algorithm,
2859 .commands = riscv_command_handlers,
2861 .address_bits = riscv_xlen_nonconst,
2864 /*** RISC-V Interface ***/
2866 void riscv_info_init(struct target *target, riscv_info_t *r)
2868 memset(r, 0, sizeof(*r));
2870 r->registers_initialized = false;
2871 r->current_hartid = target->coreid;
2873 memset(r->trigger_unique_id, 0xff, sizeof(r->trigger_unique_id));
2875 for (size_t h = 0; h < RISCV_MAX_HARTS; ++h)
2879 static int riscv_resume_go_all_harts(struct target *target)
2883 /* Dummy variables to make mingw32-gcc happy. */
2887 switch (resume_order) {
2890 last = riscv_count_harts(target) - 1;
2894 first = riscv_count_harts(target) - 1;
2902 for (int i = first; i != last + step; i += step) {
2903 if (!riscv_hart_enabled(target, i))
2906 LOG_DEBUG("resuming hart %d", i);
2907 if (riscv_set_current_hartid(target, i) != ERROR_OK)
2909 if (riscv_is_halted(target)) {
2910 if (r->resume_go(target) != ERROR_OK)
2913 LOG_DEBUG(" hart %d requested resume, but was already resumed", i);
2917 riscv_invalidate_register_cache(target);
2921 int riscv_step_rtos_hart(struct target *target)
2924 int hartid = r->current_hartid;
2925 if (riscv_rtos_enabled(target)) {
2926 hartid = r->rtos_hartid;
2928 LOG_DEBUG("GDB has asked me to step \"any\" thread, so I'm stepping hart 0.");
2932 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
2934 LOG_DEBUG("stepping hart %d", hartid);
2936 if (!riscv_is_halted(target)) {
2937 LOG_ERROR("Hart isn't halted before single step!");
2940 riscv_invalidate_register_cache(target);
2942 if (r->step_current_hart(target) != ERROR_OK)
2944 riscv_invalidate_register_cache(target);
2946 if (!riscv_is_halted(target)) {
2947 LOG_ERROR("Hart was not halted after single step!");
2953 bool riscv_supports_extension(struct target *target, int hartid, char letter)
2957 if (letter >= 'a' && letter <= 'z')
2959 else if (letter >= 'A' && letter <= 'Z')
2963 return r->misa[hartid] & (1 << num);
2966 unsigned riscv_xlen(const struct target *target)
2968 return riscv_xlen_of_hart(target, riscv_current_hartid(target));
2971 int riscv_xlen_of_hart(const struct target *target, int hartid)
2974 assert(r->xlen[hartid] != -1);
2975 return r->xlen[hartid];
2978 bool riscv_rtos_enabled(const struct target *target)
2983 int riscv_set_current_hartid(struct target *target, int hartid)
2986 if (!r->select_current_hart)
2989 int previous_hartid = riscv_current_hartid(target);
2990 r->current_hartid = hartid;
2991 assert(riscv_hart_enabled(target, hartid));
2992 LOG_DEBUG("setting hartid to %d, was %d", hartid, previous_hartid);
2993 if (r->select_current_hart(target) != ERROR_OK)
2996 /* This might get called during init, in which case we shouldn't be
2997 * setting up the register cache. */
2998 if (target_was_examined(target) && riscv_rtos_enabled(target))
2999 riscv_invalidate_register_cache(target);
3004 void riscv_invalidate_register_cache(struct target *target)
3008 LOG_DEBUG("[%d]", target->coreid);
3009 register_cache_invalidate(target->reg_cache);
3010 for (size_t i = 0; i < target->reg_cache->num_regs; ++i) {
3011 struct reg *reg = &target->reg_cache->reg_list[i];
3015 r->registers_initialized = true;
3018 int riscv_current_hartid(const struct target *target)
3021 return r->current_hartid;
3024 void riscv_set_all_rtos_harts(struct target *target)
3027 r->rtos_hartid = -1;
3030 void riscv_set_rtos_hartid(struct target *target, int hartid)
3032 LOG_DEBUG("setting RTOS hartid %d", hartid);
3034 r->rtos_hartid = hartid;
3037 int riscv_count_harts(struct target *target)
3042 if (r == NULL || r->hart_count == NULL)
3044 return r->hart_count(target);
3047 bool riscv_has_register(struct target *target, int hartid, int regid)
3054 * return true iff we are guaranteed that the register will contain exactly
3055 * the value we just wrote when it's read.
3056 * If write is false:
3057 * return true iff we are guaranteed that the register will read the same
3058 * value in the future as the value we just read.
3060 static bool gdb_regno_cacheable(enum gdb_regno regno, bool write)
3062 /* GPRs, FPRs, vector registers are just normal data stores. */
3063 if (regno <= GDB_REGNO_XPR31 ||
3064 (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31) ||
3065 (regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31))
3068 /* Most CSRs won't change value on us, but we can't assume it about rbitrary
3074 case GDB_REGNO_VSTART:
3075 case GDB_REGNO_VXSAT:
3076 case GDB_REGNO_VXRM:
3077 case GDB_REGNO_VLENB:
3079 case GDB_REGNO_VTYPE:
3080 case GDB_REGNO_MISA:
3081 case GDB_REGNO_DCSR:
3082 case GDB_REGNO_DSCRATCH0:
3083 case GDB_REGNO_MSTATUS:
3084 case GDB_REGNO_MEPC:
3085 case GDB_REGNO_MCAUSE:
3086 case GDB_REGNO_SATP:
3088 * WARL registers might not contain the value we just wrote, but
3089 * these ones won't spontaneously change their value either. *
3093 case GDB_REGNO_TSELECT: /* I think this should be above, but then it doesn't work. */
3094 case GDB_REGNO_TDATA1: /* Changes value when tselect is changed. */
3095 case GDB_REGNO_TDATA2: /* Changse value when tselect is changed. */
3102 * This function is called when the debug user wants to change the value of a
3103 * register. The new value may be cached, and may not be written until the hart
3105 int riscv_set_register(struct target *target, enum gdb_regno r, riscv_reg_t v)
3107 return riscv_set_register_on_hart(target, riscv_current_hartid(target), r, v);
3110 int riscv_set_register_on_hart(struct target *target, int hartid,
3111 enum gdb_regno regid, uint64_t value)
3114 LOG_DEBUG("{%d} %s <- %" PRIx64, hartid, gdb_regno_name(regid), value);
3115 assert(r->set_register);
3117 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3118 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 && value == 0 &&
3119 riscv_supports_extension(target, hartid, 'E'))
3122 struct reg *reg = &target->reg_cache->reg_list[regid];
3123 buf_set_u64(reg->value, 0, reg->size, value);
3125 int result = r->set_register(target, hartid, regid, value);
3126 if (result == ERROR_OK)
3127 reg->valid = gdb_regno_cacheable(regid, true);
3130 LOG_DEBUG("[%s]{%d} wrote 0x%" PRIx64 " to %s valid=%d",
3131 target_name(target), hartid, value, reg->name, reg->valid);
3135 int riscv_get_register(struct target *target, riscv_reg_t *value,
3138 return riscv_get_register_on_hart(target, value,
3139 riscv_current_hartid(target), r);
3142 int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
3143 int hartid, enum gdb_regno regid)
3147 struct reg *reg = &target->reg_cache->reg_list[regid];
3149 LOG_DEBUG("[%s]{%d} %s does not exist.",
3150 target_name(target), hartid, gdb_regno_name(regid));
3154 if (reg && reg->valid && hartid == riscv_current_hartid(target)) {
3155 *value = buf_get_u64(reg->value, 0, reg->size);
3156 LOG_DEBUG("{%d} %s: %" PRIx64 " (cached)", hartid,
3157 gdb_regno_name(regid), *value);
3161 /* TODO: Hack to deal with gdb that thinks these registers still exist. */
3162 if (regid > GDB_REGNO_XPR15 && regid <= GDB_REGNO_XPR31 &&
3163 riscv_supports_extension(target, hartid, 'E')) {
3168 int result = r->get_register(target, value, hartid, regid);
3170 if (result == ERROR_OK)
3171 reg->valid = gdb_regno_cacheable(regid, false);
3173 LOG_DEBUG("{%d} %s: %" PRIx64, hartid, gdb_regno_name(regid), *value);
3177 bool riscv_is_halted(struct target *target)
3180 assert(r->is_halted);
3181 return r->is_halted(target);
3184 enum riscv_halt_reason riscv_halt_reason(struct target *target, int hartid)
3187 if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
3188 return RISCV_HALT_ERROR;
3189 if (!riscv_is_halted(target)) {
3190 LOG_ERROR("Hart is not halted!");
3191 return RISCV_HALT_UNKNOWN;
3193 return r->halt_reason(target);
3196 size_t riscv_debug_buffer_size(struct target *target)
3199 return r->debug_buffer_size[riscv_current_hartid(target)];
3202 int riscv_write_debug_buffer(struct target *target, int index, riscv_insn_t insn)
3205 r->write_debug_buffer(target, index, insn);
3209 riscv_insn_t riscv_read_debug_buffer(struct target *target, int index)
3212 return r->read_debug_buffer(target, index);
3215 int riscv_execute_debug_buffer(struct target *target)
3218 return r->execute_debug_buffer(target);
3221 void riscv_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
3224 r->fill_dmi_write_u64(target, buf, a, d);
3227 void riscv_fill_dmi_read_u64(struct target *target, char *buf, int a)
3230 r->fill_dmi_read_u64(target, buf, a);
3233 void riscv_fill_dmi_nop_u64(struct target *target, char *buf)
3236 r->fill_dmi_nop_u64(target, buf);
3239 int riscv_dmi_write_u64_bits(struct target *target)
3242 return r->dmi_write_u64_bits(target);
3245 bool riscv_hart_enabled(struct target *target, int hartid)
3247 /* FIXME: Add a hart mask to the RTOS. */
3248 if (riscv_rtos_enabled(target))
3249 return hartid < riscv_count_harts(target);
3251 return hartid == target->coreid;
3255 * Count triggers, and initialize trigger_count for each hart.
3256 * trigger_count is initialized even if this function fails to discover
3258 * Disable any hardware triggers that have dmode set. We can't have set them
3259 * ourselves. Maybe they're left over from some killed debug session.
3261 int riscv_enumerate_triggers(struct target *target)
3265 if (r->triggers_enumerated)
3268 r->triggers_enumerated = true; /* At the very least we tried. */
3270 for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
3271 if (!riscv_hart_enabled(target, hartid))
3274 riscv_reg_t tselect;
3275 int result = riscv_get_register_on_hart(target, &tselect, hartid,
3277 if (result != ERROR_OK)
3280 for (unsigned t = 0; t < RISCV_MAX_TRIGGERS; ++t) {
3281 r->trigger_count[hartid] = t;
3283 /* If we can't write tselect, then this hart does not support triggers. */
3284 if (riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t) != ERROR_OK)
3286 uint64_t tselect_rb;
3287 result = riscv_get_register_on_hart(target, &tselect_rb, hartid,
3289 if (result != ERROR_OK)
3291 /* Mask off the top bit, which is used as tdrmode in old
3292 * implementations. */
3293 tselect_rb &= ~(1ULL << (riscv_xlen(target)-1));
3294 if (tselect_rb != t)
3297 result = riscv_get_register_on_hart(target, &tdata1, hartid,
3299 if (result != ERROR_OK)
3302 int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
3307 /* On these older cores we don't support software using
3309 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3312 if (tdata1 & MCONTROL_DMODE(riscv_xlen(target)))
3313 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
3318 riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
3320 LOG_INFO("[%d] Found %d triggers", hartid, r->trigger_count[hartid]);
3326 const char *gdb_regno_name(enum gdb_regno regno)
3328 static char buf[32];
3331 case GDB_REGNO_ZERO:
3397 case GDB_REGNO_FPR0:
3399 case GDB_REGNO_FPR31:
3401 case GDB_REGNO_CSR0:
3403 case GDB_REGNO_TSELECT:
3405 case GDB_REGNO_TDATA1:
3407 case GDB_REGNO_TDATA2:
3409 case GDB_REGNO_MISA:
3413 case GDB_REGNO_DCSR:
3415 case GDB_REGNO_DSCRATCH0:
3417 case GDB_REGNO_MSTATUS:
3419 case GDB_REGNO_MEPC:
3421 case GDB_REGNO_MCAUSE:
3423 case GDB_REGNO_PRIV:
3425 case GDB_REGNO_SATP:
3427 case GDB_REGNO_VTYPE:
3496 if (regno <= GDB_REGNO_XPR31)
3497 sprintf(buf, "x%d", regno - GDB_REGNO_ZERO);
3498 else if (regno >= GDB_REGNO_CSR0 && regno <= GDB_REGNO_CSR4095)
3499 sprintf(buf, "csr%d", regno - GDB_REGNO_CSR0);
3500 else if (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31)
3501 sprintf(buf, "f%d", regno - GDB_REGNO_FPR0);
3503 sprintf(buf, "gdb_regno_%d", regno);
3508 static int register_get(struct reg *reg)
3510 riscv_reg_info_t *reg_info = reg->arch_info;
3511 struct target *target = reg_info->target;
3514 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3515 if (!r->get_register_buf) {
3516 LOG_ERROR("Reading register %s not supported on this RISC-V target.",
3517 gdb_regno_name(reg->number));
3521 if (r->get_register_buf(target, reg->value, reg->number) != ERROR_OK)
3525 int result = riscv_get_register(target, &value, reg->number);
3526 if (result != ERROR_OK)
3528 buf_set_u64(reg->value, 0, reg->size, value);
3530 reg->valid = gdb_regno_cacheable(reg->number, false);
3531 char *str = buf_to_hex_str(reg->value, reg->size);
3532 LOG_DEBUG("[%d]{%d} read 0x%s from %s (valid=%d)", target->coreid,
3533 riscv_current_hartid(target), str, reg->name, reg->valid);
3538 static int register_set(struct reg *reg, uint8_t *buf)
3540 riscv_reg_info_t *reg_info = reg->arch_info;
3541 struct target *target = reg_info->target;
3544 char *str = buf_to_hex_str(buf, reg->size);
3545 LOG_DEBUG("[%d]{%d} write 0x%s to %s (valid=%d)", target->coreid,
3546 riscv_current_hartid(target), str, reg->name, reg->valid);
3549 memcpy(reg->value, buf, DIV_ROUND_UP(reg->size, 8));
3550 reg->valid = gdb_regno_cacheable(reg->number, true);
3552 if (reg->number == GDB_REGNO_TDATA1 ||
3553 reg->number == GDB_REGNO_TDATA2) {
3554 r->manual_hwbp_set = true;
3555 /* When enumerating triggers, we clear any triggers with DMODE set,
3556 * assuming they were left over from a previous debug session. So make
3557 * sure that is done before a user might be setting their own triggers.
3559 if (riscv_enumerate_triggers(target) != ERROR_OK)
3563 if (reg->number >= GDB_REGNO_V0 && reg->number <= GDB_REGNO_V31) {
3564 if (!r->set_register_buf) {
3565 LOG_ERROR("Writing register %s not supported on this RISC-V target.",
3566 gdb_regno_name(reg->number));
3570 if (r->set_register_buf(target, reg->number, reg->value) != ERROR_OK)
3573 uint64_t value = buf_get_u64(buf, 0, reg->size);
3574 if (riscv_set_register(target, reg->number, value) != ERROR_OK)
3581 static struct reg_arch_type riscv_reg_arch_type = {
3582 .get = register_get,
3591 static int cmp_csr_info(const void *p1, const void *p2)
3593 return (int) (((struct csr_info *)p1)->number) - (int) (((struct csr_info *)p2)->number);
3596 int riscv_init_registers(struct target *target)
3600 riscv_free_registers(target);
3602 target->reg_cache = calloc(1, sizeof(*target->reg_cache));
3603 if (!target->reg_cache)
3605 target->reg_cache->name = "RISC-V Registers";
3606 target->reg_cache->num_regs = GDB_REGNO_COUNT;
3608 if (expose_custom) {
3609 for (unsigned i = 0; expose_custom[i].low <= expose_custom[i].high; i++) {
3610 for (unsigned number = expose_custom[i].low;
3611 number <= expose_custom[i].high;
3613 target->reg_cache->num_regs++;
3617 LOG_DEBUG("create register cache for %d registers",
3618 target->reg_cache->num_regs);
3620 target->reg_cache->reg_list =
3621 calloc(target->reg_cache->num_regs, sizeof(struct reg));
3622 if (!target->reg_cache->reg_list)
3625 const unsigned int max_reg_name_len = 12;
3626 free(info->reg_names);
3628 calloc(target->reg_cache->num_regs, max_reg_name_len);
3629 if (!info->reg_names)
3631 char *reg_name = info->reg_names;
3633 int hartid = riscv_current_hartid(target);
3635 static struct reg_feature feature_cpu = {
3636 .name = "org.gnu.gdb.riscv.cpu"
3638 static struct reg_feature feature_fpu = {
3639 .name = "org.gnu.gdb.riscv.fpu"
3641 static struct reg_feature feature_csr = {
3642 .name = "org.gnu.gdb.riscv.csr"
3644 static struct reg_feature feature_vector = {
3645 .name = "org.gnu.gdb.riscv.vector"
3647 static struct reg_feature feature_virtual = {
3648 .name = "org.gnu.gdb.riscv.virtual"
3650 static struct reg_feature feature_custom = {
3651 .name = "org.gnu.gdb.riscv.custom"
3654 /* These types are built into gdb. */
3655 static struct reg_data_type type_ieee_single = { .type = REG_TYPE_IEEE_SINGLE, .id = "ieee_single" };
3656 static struct reg_data_type type_ieee_double = { .type = REG_TYPE_IEEE_DOUBLE, .id = "ieee_double" };
3657 static struct reg_data_type_union_field single_double_fields[] = {
3658 {"float", &type_ieee_single, single_double_fields + 1},
3659 {"double", &type_ieee_double, NULL},
3661 static struct reg_data_type_union single_double_union = {
3662 .fields = single_double_fields
3664 static struct reg_data_type type_ieee_single_double = {
3665 .type = REG_TYPE_ARCH_DEFINED,
3667 .type_class = REG_TYPE_CLASS_UNION,
3668 .reg_type_union = &single_double_union
3670 static struct reg_data_type type_uint8 = { .type = REG_TYPE_UINT8, .id = "uint8" };
3671 static struct reg_data_type type_uint16 = { .type = REG_TYPE_UINT16, .id = "uint16" };
3672 static struct reg_data_type type_uint32 = { .type = REG_TYPE_UINT32, .id = "uint32" };
3673 static struct reg_data_type type_uint64 = { .type = REG_TYPE_UINT64, .id = "uint64" };
3674 static struct reg_data_type type_uint128 = { .type = REG_TYPE_UINT128, .id = "uint128" };
3676 /* This is roughly the XML we want:
3677 * <vector id="bytes" type="uint8" count="16"/>
3678 * <vector id="shorts" type="uint16" count="8"/>
3679 * <vector id="words" type="uint32" count="4"/>
3680 * <vector id="longs" type="uint64" count="2"/>
3681 * <vector id="quads" type="uint128" count="1"/>
3682 * <union id="riscv_vector_type">
3683 * <field name="b" type="bytes"/>
3684 * <field name="s" type="shorts"/>
3685 * <field name="w" type="words"/>
3686 * <field name="l" type="longs"/>
3687 * <field name="q" type="quads"/>
3691 info->vector_uint8.type = &type_uint8;
3692 info->vector_uint8.count = info->vlenb[hartid];
3693 info->type_uint8_vector.type = REG_TYPE_ARCH_DEFINED;
3694 info->type_uint8_vector.id = "bytes";
3695 info->type_uint8_vector.type_class = REG_TYPE_CLASS_VECTOR;
3696 info->type_uint8_vector.reg_type_vector = &info->vector_uint8;
3698 info->vector_uint16.type = &type_uint16;
3699 info->vector_uint16.count = info->vlenb[hartid] / 2;
3700 info->type_uint16_vector.type = REG_TYPE_ARCH_DEFINED;
3701 info->type_uint16_vector.id = "shorts";
3702 info->type_uint16_vector.type_class = REG_TYPE_CLASS_VECTOR;
3703 info->type_uint16_vector.reg_type_vector = &info->vector_uint16;
3705 info->vector_uint32.type = &type_uint32;
3706 info->vector_uint32.count = info->vlenb[hartid] / 4;
3707 info->type_uint32_vector.type = REG_TYPE_ARCH_DEFINED;
3708 info->type_uint32_vector.id = "words";
3709 info->type_uint32_vector.type_class = REG_TYPE_CLASS_VECTOR;
3710 info->type_uint32_vector.reg_type_vector = &info->vector_uint32;
3712 info->vector_uint64.type = &type_uint64;
3713 info->vector_uint64.count = info->vlenb[hartid] / 8;
3714 info->type_uint64_vector.type = REG_TYPE_ARCH_DEFINED;
3715 info->type_uint64_vector.id = "longs";
3716 info->type_uint64_vector.type_class = REG_TYPE_CLASS_VECTOR;
3717 info->type_uint64_vector.reg_type_vector = &info->vector_uint64;
3719 info->vector_uint128.type = &type_uint128;
3720 info->vector_uint128.count = info->vlenb[hartid] / 16;
3721 info->type_uint128_vector.type = REG_TYPE_ARCH_DEFINED;
3722 info->type_uint128_vector.id = "quads";
3723 info->type_uint128_vector.type_class = REG_TYPE_CLASS_VECTOR;
3724 info->type_uint128_vector.reg_type_vector = &info->vector_uint128;
3726 info->vector_fields[0].name = "b";
3727 info->vector_fields[0].type = &info->type_uint8_vector;
3728 if (info->vlenb[hartid] >= 2) {
3729 info->vector_fields[0].next = info->vector_fields + 1;
3730 info->vector_fields[1].name = "s";
3731 info->vector_fields[1].type = &info->type_uint16_vector;
3733 info->vector_fields[0].next = NULL;
3735 if (info->vlenb[hartid] >= 4) {
3736 info->vector_fields[1].next = info->vector_fields + 2;
3737 info->vector_fields[2].name = "w";
3738 info->vector_fields[2].type = &info->type_uint32_vector;
3740 info->vector_fields[1].next = NULL;
3742 if (info->vlenb[hartid] >= 8) {
3743 info->vector_fields[2].next = info->vector_fields + 3;
3744 info->vector_fields[3].name = "l";
3745 info->vector_fields[3].type = &info->type_uint64_vector;
3747 info->vector_fields[2].next = NULL;
3749 if (info->vlenb[hartid] >= 16) {
3750 info->vector_fields[3].next = info->vector_fields + 4;
3751 info->vector_fields[4].name = "q";
3752 info->vector_fields[4].type = &info->type_uint128_vector;
3754 info->vector_fields[3].next = NULL;
3756 info->vector_fields[4].next = NULL;
3758 info->vector_union.fields = info->vector_fields;
3760 info->type_vector.type = REG_TYPE_ARCH_DEFINED;
3761 info->type_vector.id = "riscv_vector";
3762 info->type_vector.type_class = REG_TYPE_CLASS_UNION;
3763 info->type_vector.reg_type_union = &info->vector_union;
3765 struct csr_info csr_info[] = {
3766 #define DECLARE_CSR(name, number) { number, #name },
3767 #include "encoding.h"
3770 /* encoding.h does not contain the registers in sorted order. */
3771 qsort(csr_info, DIM(csr_info), sizeof(*csr_info), cmp_csr_info);
3772 unsigned csr_info_index = 0;
3774 unsigned custom_range_index = 0;
3775 int custom_within_range = 0;
3777 riscv_reg_info_t *shared_reg_info = calloc(1, sizeof(riscv_reg_info_t));
3778 if (!shared_reg_info)
3780 shared_reg_info->target = target;
3782 /* When gdb requests register N, gdb_get_register_packet() assumes that this
3783 * is register at index N in reg_list. So if there are certain registers
3784 * that don't exist, we need to leave holes in the list (or renumber, but
3785 * it would be nice not to have yet another set of numbers to translate
3787 for (uint32_t number = 0; number < target->reg_cache->num_regs; number++) {
3788 struct reg *r = &target->reg_cache->reg_list[number];
3792 r->type = &riscv_reg_arch_type;
3793 r->arch_info = shared_reg_info;
3795 r->size = riscv_xlen(target);
3796 /* r->size is set in riscv_invalidate_register_cache, maybe because the
3797 * target is in theory allowed to change XLEN on us. But I expect a lot
3798 * of other things to break in that case as well. */
3799 if (number <= GDB_REGNO_XPR31) {
3800 r->exist = number <= GDB_REGNO_XPR15 ||
3801 !riscv_supports_extension(target, hartid, 'E');
3802 /* TODO: For now we fake that all GPRs exist because otherwise gdb
3805 r->caller_save = true;
3807 case GDB_REGNO_ZERO:
3904 r->group = "general";
3905 r->feature = &feature_cpu;
3906 } else if (number == GDB_REGNO_PC) {
3907 r->caller_save = true;
3908 sprintf(reg_name, "pc");
3909 r->group = "general";
3910 r->feature = &feature_cpu;
3911 } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
3912 r->caller_save = true;
3913 if (riscv_supports_extension(target, hartid, 'D')) {
3915 if (riscv_supports_extension(target, hartid, 'F'))
3916 r->reg_data_type = &type_ieee_single_double;
3918 r->reg_data_type = &type_ieee_double;
3919 } else if (riscv_supports_extension(target, hartid, 'F')) {
3920 r->reg_data_type = &type_ieee_single;
4004 case GDB_REGNO_FS10:
4007 case GDB_REGNO_FS11:
4016 case GDB_REGNO_FT10:
4019 case GDB_REGNO_FT11:
4024 r->feature = &feature_fpu;
4025 } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
4027 r->feature = &feature_csr;
4028 unsigned csr_number = number - GDB_REGNO_CSR0;
4030 while (csr_info[csr_info_index].number < csr_number &&
4031 csr_info_index < DIM(csr_info) - 1) {
4034 if (csr_info[csr_info_index].number == csr_number) {
4035 r->name = csr_info[csr_info_index].name;
4037 sprintf(reg_name, "csr%d", csr_number);
4038 /* Assume unnamed registers don't exist, unless we have some
4039 * configuration that tells us otherwise. That's important
4040 * because eg. Eclipse crashes if a target has too many
4041 * registers, and apparently has no way of only showing a
4042 * subset of registers in any case. */
4046 switch (csr_number) {
4050 r->exist = riscv_supports_extension(target, hartid, 'F');
4052 r->feature = &feature_fpu;
4058 case CSR_SCOUNTEREN:
4064 r->exist = riscv_supports_extension(target, hartid, 'S');
4068 /* "In systems with only M-mode, or with both M-mode and
4069 * U-mode but without U-mode trap support, the medeleg and
4070 * mideleg registers should not exist." */
4071 r->exist = riscv_supports_extension(target, hartid, 'S') ||
4072 riscv_supports_extension(target, hartid, 'N');
4080 case CSR_HPMCOUNTER3H:
4081 case CSR_HPMCOUNTER4H:
4082 case CSR_HPMCOUNTER5H:
4083 case CSR_HPMCOUNTER6H:
4084 case CSR_HPMCOUNTER7H:
4085 case CSR_HPMCOUNTER8H:
4086 case CSR_HPMCOUNTER9H:
4087 case CSR_HPMCOUNTER10H:
4088 case CSR_HPMCOUNTER11H:
4089 case CSR_HPMCOUNTER12H:
4090 case CSR_HPMCOUNTER13H:
4091 case CSR_HPMCOUNTER14H:
4092 case CSR_HPMCOUNTER15H:
4093 case CSR_HPMCOUNTER16H:
4094 case CSR_HPMCOUNTER17H:
4095 case CSR_HPMCOUNTER18H:
4096 case CSR_HPMCOUNTER19H:
4097 case CSR_HPMCOUNTER20H:
4098 case CSR_HPMCOUNTER21H:
4099 case CSR_HPMCOUNTER22H:
4100 case CSR_HPMCOUNTER23H:
4101 case CSR_HPMCOUNTER24H:
4102 case CSR_HPMCOUNTER25H:
4103 case CSR_HPMCOUNTER26H:
4104 case CSR_HPMCOUNTER27H:
4105 case CSR_HPMCOUNTER28H:
4106 case CSR_HPMCOUNTER29H:
4107 case CSR_HPMCOUNTER30H:
4108 case CSR_HPMCOUNTER31H:
4111 case CSR_MHPMCOUNTER3H:
4112 case CSR_MHPMCOUNTER4H:
4113 case CSR_MHPMCOUNTER5H:
4114 case CSR_MHPMCOUNTER6H:
4115 case CSR_MHPMCOUNTER7H:
4116 case CSR_MHPMCOUNTER8H:
4117 case CSR_MHPMCOUNTER9H:
4118 case CSR_MHPMCOUNTER10H:
4119 case CSR_MHPMCOUNTER11H:
4120 case CSR_MHPMCOUNTER12H:
4121 case CSR_MHPMCOUNTER13H:
4122 case CSR_MHPMCOUNTER14H:
4123 case CSR_MHPMCOUNTER15H:
4124 case CSR_MHPMCOUNTER16H:
4125 case CSR_MHPMCOUNTER17H:
4126 case CSR_MHPMCOUNTER18H:
4127 case CSR_MHPMCOUNTER19H:
4128 case CSR_MHPMCOUNTER20H:
4129 case CSR_MHPMCOUNTER21H:
4130 case CSR_MHPMCOUNTER22H:
4131 case CSR_MHPMCOUNTER23H:
4132 case CSR_MHPMCOUNTER24H:
4133 case CSR_MHPMCOUNTER25H:
4134 case CSR_MHPMCOUNTER26H:
4135 case CSR_MHPMCOUNTER27H:
4136 case CSR_MHPMCOUNTER28H:
4137 case CSR_MHPMCOUNTER29H:
4138 case CSR_MHPMCOUNTER30H:
4139 case CSR_MHPMCOUNTER31H:
4140 r->exist = riscv_xlen(target) == 32;
4149 r->exist = riscv_supports_extension(target, hartid, 'V');
4153 if (!r->exist && expose_csr) {
4154 for (unsigned i = 0; expose_csr[i].low <= expose_csr[i].high; i++) {
4155 if (csr_number >= expose_csr[i].low && csr_number <= expose_csr[i].high) {
4156 LOG_INFO("Exposing additional CSR %d", csr_number);
4163 } else if (number == GDB_REGNO_PRIV) {
4164 sprintf(reg_name, "priv");
4165 r->group = "general";
4166 r->feature = &feature_virtual;
4169 } else if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31) {
4170 r->caller_save = false;
4171 r->exist = riscv_supports_extension(target, hartid, 'V') && info->vlenb[hartid];
4172 r->size = info->vlenb[hartid] * 8;
4173 sprintf(reg_name, "v%d", number - GDB_REGNO_V0);
4174 r->group = "vector";
4175 r->feature = &feature_vector;
4176 r->reg_data_type = &info->type_vector;
4178 } else if (number >= GDB_REGNO_COUNT) {
4179 /* Custom registers. */
4180 assert(expose_custom);
4182 range_t *range = &expose_custom[custom_range_index];
4183 assert(range->low <= range->high);
4184 unsigned custom_number = range->low + custom_within_range;
4186 r->group = "custom";
4187 r->feature = &feature_custom;
4188 r->arch_info = calloc(1, sizeof(riscv_reg_info_t));
4191 ((riscv_reg_info_t *) r->arch_info)->target = target;
4192 ((riscv_reg_info_t *) r->arch_info)->custom_number = custom_number;
4193 sprintf(reg_name, "custom%d", custom_number);
4195 custom_within_range++;
4196 if (custom_within_range > range->high - range->low) {
4197 custom_within_range = 0;
4198 custom_range_index++;
4204 reg_name += strlen(reg_name) + 1;
4205 assert(reg_name < info->reg_names + target->reg_cache->num_regs *
4207 r->value = info->reg_cache_values[number];
4214 void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field,
4215 riscv_bscan_tunneled_scan_context_t *ctxt)
4217 jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE);
4219 memset(ctxt->tunneled_dr, 0, sizeof(ctxt->tunneled_dr));
4220 if (bscan_tunnel_type == BSCAN_TUNNEL_DATA_REGISTER) {
4221 ctxt->tunneled_dr[3].num_bits = 1;
4222 ctxt->tunneled_dr[3].out_value = bscan_one;
4223 ctxt->tunneled_dr[2].num_bits = 7;
4224 ctxt->tunneled_dr_width = field->num_bits;
4225 ctxt->tunneled_dr[2].out_value = &ctxt->tunneled_dr_width;
4226 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4227 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4229 ctxt->tunneled_dr[1].num_bits = field->num_bits + 1;
4230 ctxt->tunneled_dr[1].out_value = field->out_value;
4231 ctxt->tunneled_dr[1].in_value = field->in_value;
4233 ctxt->tunneled_dr[0].num_bits = 3;
4234 ctxt->tunneled_dr[0].out_value = bscan_zero;
4236 /* BSCAN_TUNNEL_NESTED_TAP */
4237 ctxt->tunneled_dr[0].num_bits = 1;
4238 ctxt->tunneled_dr[0].out_value = bscan_one;
4239 ctxt->tunneled_dr[1].num_bits = 7;
4240 ctxt->tunneled_dr_width = field->num_bits;
4241 ctxt->tunneled_dr[1].out_value = &ctxt->tunneled_dr_width;
4242 /* for BSCAN tunnel, there is a one-TCK skew between shift in and shift out, so
4243 scanning num_bits + 1, and then will right shift the input field after executing the queues */
4244 ctxt->tunneled_dr[2].num_bits = field->num_bits + 1;
4245 ctxt->tunneled_dr[2].out_value = field->out_value;
4246 ctxt->tunneled_dr[2].in_value = field->in_value;
4247 ctxt->tunneled_dr[3].num_bits = 3;
4248 ctxt->tunneled_dr[3].out_value = bscan_zero;
4250 jtag_add_dr_scan(target->tap, ARRAY_SIZE(ctxt->tunneled_dr), ctxt->tunneled_dr, TAP_IDLE);