1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Google, LLC.
4 * Author: Moritz Fischer <moritzf@google.com>
15 #include <linux/pci.h>
17 #include <jtag/interface.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
23 /* Available only from kernel v4.10 */
24 #ifndef PCI_CFG_SPACE_EXP_SIZE
25 #define PCI_CFG_SPACE_EXP_SIZE 4096
28 #define PCIE_EXT_CAP_LST 0x100
30 #define XLNX_XVC_EXT_CAP 0x00
31 #define XLNX_XVC_VSEC_HDR 0x04
32 #define XLNX_XVC_LEN_REG 0x0C
33 #define XLNX_XVC_TMS_REG 0x10
34 #define XLNX_XVC_TDx_REG 0x14
36 #define XLNX_XVC_CAP_SIZE 0x20
37 #define XLNX_XVC_VSEC_ID 0x8
38 #define XLNX_XVC_MAX_BITS 0x20
40 #define MASK_ACK(x) (((x) >> 9) & 0x7)
41 #define MASK_PAR(x) ((int)((x) & 0x1))
43 struct xlnx_pcie_xvc {
49 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
50 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
52 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
57 /* Note: This should be ok endianess-wise because by going
58 * through sysfs the kernel does the conversion in the config
59 * space accessor functions
61 err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
62 xlnx_pcie_xvc->offset + offset);
63 if (err != sizeof(res)) {
64 LOG_ERROR("Failed to read offset %x", offset);
65 return ERROR_JTAG_DEVICE_ERROR;
74 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
78 /* Note: This should be ok endianess-wise because by going
79 * through sysfs the kernel does the conversion in the config
80 * space accessor functions
82 err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
83 xlnx_pcie_xvc->offset + offset);
84 if (err != sizeof(val)) {
85 LOG_ERROR("Failed to write offset: %x with value: %x",
87 return ERROR_JTAG_DEVICE_ERROR;
93 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
98 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
102 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
106 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDx_REG, tdi);
110 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDx_REG, tdo);
115 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %x, tdi: %x, tdo: %x",
116 num_bits, tms, tdi, *tdo);
118 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %x, tdi: %x, tdo: <null>",
123 int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
125 int tms = tap_get_state() == TAP_RESET ? 1 : 0;
126 size_t left = cmd->cmd.stableclocks->num_cycles;
130 LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
133 write = MIN(XLNX_XVC_MAX_BITS, left);
134 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
143 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
145 uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
146 tap_get_end_state());
147 int tms_count = tap_get_tms_path_len(tap_get_state(),
148 tap_get_end_state());
151 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
152 tap_state_name(tap_get_state()),
153 tap_state_name(tap_get_end_state()));
156 err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
160 tap_set_state(tap_get_end_state());
165 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
169 LOG_DEBUG("runtest %i cycles, end in %i",
170 cmd->cmd.runtest->num_cycles,
171 cmd->cmd.runtest->end_state);
173 tap_state_t tmp_state = tap_get_end_state();
175 if (tap_get_state() != TAP_IDLE) {
176 tap_set_end_state(TAP_IDLE);
177 err = xlnx_pcie_xvc_execute_statemove(0);
182 size_t left = cmd->cmd.runtest->num_cycles;
186 write = MIN(XLNX_XVC_MAX_BITS, left);
187 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
193 tap_set_end_state(tmp_state);
194 if (tap_get_state() != tap_get_end_state())
195 err = xlnx_pcie_xvc_execute_statemove(0);
200 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
202 size_t num_states = cmd->cmd.pathmove->num_states;
203 tap_state_t *path = cmd->cmd.pathmove->path;
207 LOG_DEBUG("pathmove: %i states, end in %i",
208 cmd->cmd.pathmove->num_states,
209 cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
211 for (i = 0; i < num_states; i++) {
212 if (path[i] == tap_state_transition(tap_get_state(), false)) {
213 err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
214 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
215 err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
217 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
218 tap_state_name(tap_get_state()),
219 tap_state_name(path[i]));
220 err = ERROR_JTAG_QUEUE_FAILED;
224 tap_set_state(path[i]);
227 tap_set_end_state(tap_get_state());
232 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
234 enum scan_type type = jtag_scan_type(cmd->cmd.scan);
235 tap_state_t saved_end_state = cmd->cmd.scan->end_state;
236 bool ir_scan = cmd->cmd.scan->ir_scan;
237 uint32_t tdi, tms, tdo;
238 uint8_t *buf, *rd_ptr;
243 scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
245 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
246 (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
247 tap_state_name(tap_get_state()),
248 tap_state_name(cmd->cmd.scan->end_state));
250 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
251 * vice-versa, do a statemove to corresponding other state, then restore
254 if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
255 tap_set_end_state(TAP_IRSHIFT);
256 err = xlnx_pcie_xvc_execute_statemove(0);
259 tap_set_end_state(saved_end_state);
260 } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
261 tap_set_end_state(TAP_DRSHIFT);
262 err = xlnx_pcie_xvc_execute_statemove(0);
265 tap_set_end_state(saved_end_state);
270 write = MIN(XLNX_XVC_MAX_BITS, left);
271 /* the last TMS should be a 1, to leave the state */
272 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
273 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
274 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
279 if (type != SCAN_OUT)
280 buf_set_u32(rd_ptr, 0, write, tdo);
281 rd_ptr += sizeof(uint32_t);
284 err = jtag_read_buffer(buf, cmd->cmd.scan);
288 if (tap_get_state() != tap_get_end_state())
289 err = xlnx_pcie_xvc_execute_statemove(1);
299 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
301 LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
302 cmd->cmd.reset->srst);
305 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
307 LOG_DEBUG("sleep %" PRIi32 "", cmd->cmd.sleep->us);
308 usleep(cmd->cmd.sleep->us);
311 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
313 const size_t num_bits = cmd->cmd.tms->num_bits;
314 const uint8_t *bits = cmd->cmd.tms->bits;
319 LOG_DEBUG("execute tms %zu", num_bits);
323 write = MIN(XLNX_XVC_MAX_BITS, left);
324 tms = buf_get_u32(bits, 0, write);
325 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
335 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
337 LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
339 case JTAG_STABLECLOCKS:
340 return xlnx_pcie_xvc_execute_stableclocks(cmd);
342 return xlnx_pcie_xvc_execute_runtest(cmd);
344 tap_set_end_state(cmd->cmd.statemove->end_state);
345 return xlnx_pcie_xvc_execute_statemove(0);
347 return xlnx_pcie_xvc_execute_pathmove(cmd);
349 return xlnx_pcie_xvc_execute_scan(cmd);
351 xlnx_pcie_xvc_execute_reset(cmd);
354 xlnx_pcie_xvc_execute_sleep(cmd);
357 return xlnx_pcie_xvc_execute_tms(cmd);
359 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
360 return ERROR_JTAG_QUEUE_FAILED;
366 static int xlnx_pcie_xvc_execute_queue(void)
368 struct jtag_command *cmd = jtag_command_queue;
372 ret = xlnx_pcie_xvc_execute_command(cmd);
384 static int xlnx_pcie_xvc_init(void)
386 char filename[PATH_MAX];
390 snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
391 xlnx_pcie_xvc->device);
392 xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
393 if (xlnx_pcie_xvc->fd < 0) {
394 LOG_ERROR("Failed to open device: %s", filename);
395 return ERROR_JTAG_INIT_FAILED;
398 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
399 xlnx_pcie_xvc->device);
400 /* Parse the PCIe extended capability list and try to find
401 * vendor specific header */
402 xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
403 while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
404 xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
405 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
408 LOG_DEBUG("Checking capability at 0x%x; id=0x%04x version=0x%x next=0x%x",
409 xlnx_pcie_xvc->offset,
411 PCI_EXT_CAP_VER(cap),
412 PCI_EXT_CAP_NEXT(cap));
413 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
414 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
417 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%x; rev: 0x%x; length: 0x%x",
418 xlnx_pcie_xvc->offset,
419 PCI_VNDR_HEADER_ID(vh),
420 PCI_VNDR_HEADER_REV(vh),
421 PCI_VNDR_HEADER_LEN(vh));
422 if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
423 (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
426 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
428 if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
429 xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
430 close(xlnx_pcie_xvc->fd);
431 return ERROR_JTAG_INIT_FAILED;
434 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
439 static int xlnx_pcie_xvc_quit(void)
443 err = close(xlnx_pcie_xvc->fd);
450 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
453 return ERROR_COMMAND_SYNTAX_ERROR;
455 /* we can't really free this in a safe manner, so at least
456 * limit the memory we're leaking by freeing the old one first
457 * before allocating a new one ...
459 if (xlnx_pcie_xvc->device)
460 free(xlnx_pcie_xvc->device);
462 xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
466 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
468 .name = "xlnx_pcie_xvc_config",
469 .handler = xlnx_pcie_xvc_handle_config_command,
470 .mode = COMMAND_CONFIG,
471 .help = "Configure XVC/PCIe JTAG adapter",
474 COMMAND_REGISTRATION_DONE
477 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
478 .execute_queue = &xlnx_pcie_xvc_execute_queue,
481 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
489 write = MIN(XLNX_XVC_MAX_BITS, left);
490 send = buf_get_u32(seq, 0, write);
491 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
495 seq += sizeof(uint32_t);
501 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
505 LOG_DEBUG("SWD line reset");
506 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
507 swd_seq_line_reset_len);
509 LOG_DEBUG("JTAG-to-SWD");
510 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
511 swd_seq_jtag_to_swd_len);
513 LOG_DEBUG("SWD-to-JTAG");
514 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
515 swd_seq_swd_to_jtag_len);
517 LOG_ERROR("Sequence %d not supported", seq);
524 static int queued_retval;
526 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
527 uint32_t ap_delay_clk);
529 static void swd_clear_sticky_errors(void)
531 xlnx_pcie_xvc_swd_write_reg(swd_cmd(false, false, DP_ABORT),
532 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
535 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
536 uint32_t ap_delay_clk)
538 uint32_t res, ack, rpar;
541 assert(cmd & SWD_CMD_RnW);
543 cmd |= SWD_CMD_START | SWD_CMD_PARK;
545 err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
552 err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
557 err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
561 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
562 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
563 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
564 cmd & SWD_CMD_APnDP ? "AP" : "DP",
565 cmd & SWD_CMD_RnW ? "read" : "write",
566 (cmd & SWD_CMD_A32) >> 1,
570 if (MASK_PAR(rpar) != parity_u32(res)) {
571 LOG_DEBUG_IO("Wrong parity detected");
572 queued_retval = ERROR_FAIL;
577 if (cmd & SWD_CMD_APnDP)
578 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
582 LOG_DEBUG_IO("SWD_ACK_WAIT");
583 swd_clear_sticky_errors();
586 LOG_DEBUG_IO("SWD_ACK_FAULT");
590 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
598 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
599 uint32_t ap_delay_clk)
604 assert(!(cmd & SWD_CMD_RnW));
606 cmd |= SWD_CMD_START | SWD_CMD_PARK;
607 /* cmd + trn + ack */
608 err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
615 err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
620 err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
624 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
625 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
626 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
627 cmd & SWD_CMD_APnDP ? "AP" : "DP",
628 cmd & SWD_CMD_RnW ? "read" : "write",
629 (cmd & SWD_CMD_A32) >> 1,
634 if (cmd & SWD_CMD_APnDP)
635 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
639 LOG_DEBUG_IO("SWD_ACK_WAIT");
640 swd_clear_sticky_errors();
643 LOG_DEBUG_IO("SWD_ACK_FAULT");
647 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
656 static int xlnx_pcie_xvc_swd_run_queue(void)
660 /* we want at least 8 idle cycles between each transaction */
661 err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
666 queued_retval = ERROR_OK;
667 LOG_DEBUG("SWD queue return value: %02x", err);
672 static int xlnx_pcie_xvc_swd_init(void)
677 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
678 .init = xlnx_pcie_xvc_swd_init,
679 .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
680 .read_reg = xlnx_pcie_xvc_swd_read_reg,
681 .write_reg = xlnx_pcie_xvc_swd_write_reg,
682 .run = xlnx_pcie_xvc_swd_run_queue,
685 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
687 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
688 .name = "xlnx_pcie_xvc",
689 .transports = xlnx_pcie_xvc_transports,
690 .commands = xlnx_pcie_xvc_command_handlers,
692 .init = &xlnx_pcie_xvc_init,
693 .quit = &xlnx_pcie_xvc_quit,
695 .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
696 .swd_ops = &xlnx_pcie_xvc_swd_ops,