1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Google, LLC.
4 * Author: Moritz Fischer <moritzf@google.com>
15 #include <linux/pci.h>
17 #include <jtag/interface.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
23 /* Available only from kernel v4.10 */
24 #ifndef PCI_CFG_SPACE_EXP_SIZE
25 #define PCI_CFG_SPACE_EXP_SIZE 4096
28 #define PCIE_EXT_CAP_LST 0x100
30 #define XLNX_XVC_EXT_CAP 0x00
31 #define XLNX_XVC_VSEC_HDR 0x04
32 #define XLNX_XVC_LEN_REG 0x0C
33 #define XLNX_XVC_TMS_REG 0x10
34 #define XLNX_XVC_TDX_REG 0x14
36 #define XLNX_XVC_CAP_SIZE 0x20
37 #define XLNX_XVC_VSEC_ID 0x8
38 #define XLNX_XVC_MAX_BITS 0x20
40 #define MASK_ACK(x) (((x) >> 9) & 0x7)
41 #define MASK_PAR(x) ((int)((x) & 0x1))
43 struct xlnx_pcie_xvc {
49 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
50 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
52 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
57 /* Note: This should be ok endianness-wise because by going
58 * through sysfs the kernel does the conversion in the config
59 * space accessor functions
61 err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
62 xlnx_pcie_xvc->offset + offset);
63 if (err != sizeof(res)) {
64 LOG_ERROR("Failed to read offset %x", offset);
65 return ERROR_JTAG_DEVICE_ERROR;
74 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
78 /* Note: This should be ok endianness-wise because by going
79 * through sysfs the kernel does the conversion in the config
80 * space accessor functions
82 err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
83 xlnx_pcie_xvc->offset + offset);
84 if (err != sizeof(val)) {
85 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32,
87 return ERROR_JTAG_DEVICE_ERROR;
93 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
98 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
102 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
106 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDX_REG, tdi);
110 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDX_REG, tdo);
115 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: %" PRIx32,
116 num_bits, tms, tdi, *tdo);
118 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: <null>",
123 static int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
125 int tms = tap_get_state() == TAP_RESET ? 1 : 0;
126 size_t left = cmd->cmd.stableclocks->num_cycles;
130 LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
133 write = MIN(XLNX_XVC_MAX_BITS, left);
134 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
143 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
145 uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
146 tap_get_end_state());
147 int tms_count = tap_get_tms_path_len(tap_get_state(),
148 tap_get_end_state());
151 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
152 tap_state_name(tap_get_state()),
153 tap_state_name(tap_get_end_state()));
156 err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
160 tap_set_state(tap_get_end_state());
165 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
169 LOG_DEBUG("runtest %i cycles, end in %i",
170 cmd->cmd.runtest->num_cycles,
171 cmd->cmd.runtest->end_state);
173 tap_state_t tmp_state = tap_get_end_state();
175 if (tap_get_state() != TAP_IDLE) {
176 tap_set_end_state(TAP_IDLE);
177 err = xlnx_pcie_xvc_execute_statemove(0);
182 size_t left = cmd->cmd.runtest->num_cycles;
186 write = MIN(XLNX_XVC_MAX_BITS, left);
187 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
193 tap_set_end_state(tmp_state);
194 if (tap_get_state() != tap_get_end_state())
195 err = xlnx_pcie_xvc_execute_statemove(0);
200 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
202 size_t num_states = cmd->cmd.pathmove->num_states;
203 tap_state_t *path = cmd->cmd.pathmove->path;
207 LOG_DEBUG("pathmove: %i states, end in %i",
208 cmd->cmd.pathmove->num_states,
209 cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
211 for (i = 0; i < num_states; i++) {
212 if (path[i] == tap_state_transition(tap_get_state(), false)) {
213 err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
214 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
215 err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
217 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
218 tap_state_name(tap_get_state()),
219 tap_state_name(path[i]));
220 err = ERROR_JTAG_QUEUE_FAILED;
224 tap_set_state(path[i]);
227 tap_set_end_state(tap_get_state());
232 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
234 enum scan_type type = jtag_scan_type(cmd->cmd.scan);
235 tap_state_t saved_end_state = cmd->cmd.scan->end_state;
236 bool ir_scan = cmd->cmd.scan->ir_scan;
237 uint32_t tdi, tms, tdo;
238 uint8_t *buf, *rd_ptr;
243 scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
245 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
246 (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
247 tap_state_name(tap_get_state()),
248 tap_state_name(cmd->cmd.scan->end_state));
250 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
251 * vice-versa, do a statemove to corresponding other state, then restore
254 if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
255 tap_set_end_state(TAP_IRSHIFT);
256 err = xlnx_pcie_xvc_execute_statemove(0);
259 tap_set_end_state(saved_end_state);
260 } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
261 tap_set_end_state(TAP_DRSHIFT);
262 err = xlnx_pcie_xvc_execute_statemove(0);
265 tap_set_end_state(saved_end_state);
270 write = MIN(XLNX_XVC_MAX_BITS, left);
271 /* the last TMS should be a 1, to leave the state */
272 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
273 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
274 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
279 if (type != SCAN_OUT)
280 buf_set_u32(rd_ptr, 0, write, tdo);
281 rd_ptr += sizeof(uint32_t);
284 err = jtag_read_buffer(buf, cmd->cmd.scan);
287 if (tap_get_state() != tap_get_end_state())
288 err = xlnx_pcie_xvc_execute_statemove(1);
297 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
299 LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
300 cmd->cmd.reset->srst);
303 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
305 LOG_DEBUG("sleep %" PRIu32 "", cmd->cmd.sleep->us);
306 usleep(cmd->cmd.sleep->us);
309 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
311 const size_t num_bits = cmd->cmd.tms->num_bits;
312 const uint8_t *bits = cmd->cmd.tms->bits;
317 LOG_DEBUG("execute tms %zu", num_bits);
321 write = MIN(XLNX_XVC_MAX_BITS, left);
322 tms = buf_get_u32(bits, 0, write);
323 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
333 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
335 LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
337 case JTAG_STABLECLOCKS:
338 return xlnx_pcie_xvc_execute_stableclocks(cmd);
340 return xlnx_pcie_xvc_execute_runtest(cmd);
342 tap_set_end_state(cmd->cmd.statemove->end_state);
343 return xlnx_pcie_xvc_execute_statemove(0);
345 return xlnx_pcie_xvc_execute_pathmove(cmd);
347 return xlnx_pcie_xvc_execute_scan(cmd);
349 xlnx_pcie_xvc_execute_reset(cmd);
352 xlnx_pcie_xvc_execute_sleep(cmd);
355 return xlnx_pcie_xvc_execute_tms(cmd);
357 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
358 return ERROR_JTAG_QUEUE_FAILED;
364 static int xlnx_pcie_xvc_execute_queue(void)
366 struct jtag_command *cmd = jtag_command_queue;
370 ret = xlnx_pcie_xvc_execute_command(cmd);
382 static int xlnx_pcie_xvc_init(void)
384 char filename[PATH_MAX];
388 snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
389 xlnx_pcie_xvc->device);
390 xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
391 if (xlnx_pcie_xvc->fd < 0) {
392 LOG_ERROR("Failed to open device: %s", filename);
393 return ERROR_JTAG_INIT_FAILED;
396 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
397 xlnx_pcie_xvc->device);
398 /* Parse the PCIe extended capability list and try to find
399 * vendor specific header */
400 xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
401 while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
402 xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
403 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
406 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32 " version=0x%" PRIx32 " next=0x%" PRIx32,
407 xlnx_pcie_xvc->offset,
409 PCI_EXT_CAP_VER(cap),
410 PCI_EXT_CAP_NEXT(cap));
411 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
412 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
415 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32 "; rev: 0x%" PRIx32 "; length: 0x%" PRIx32,
416 xlnx_pcie_xvc->offset,
417 PCI_VNDR_HEADER_ID(vh),
418 PCI_VNDR_HEADER_REV(vh),
419 PCI_VNDR_HEADER_LEN(vh));
420 if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
421 (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
424 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
426 if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
427 xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
428 close(xlnx_pcie_xvc->fd);
429 return ERROR_JTAG_INIT_FAILED;
432 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
437 static int xlnx_pcie_xvc_quit(void)
441 err = close(xlnx_pcie_xvc->fd);
448 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
451 return ERROR_COMMAND_SYNTAX_ERROR;
453 /* we can't really free this in a safe manner, so at least
454 * limit the memory we're leaking by freeing the old one first
455 * before allocating a new one ...
457 free(xlnx_pcie_xvc->device);
459 xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
463 static const struct command_registration xlnx_pcie_xvc_subcommand_handlers[] = {
466 .handler = xlnx_pcie_xvc_handle_config_command,
467 .mode = COMMAND_CONFIG,
468 .help = "Configure XVC/PCIe JTAG adapter",
471 COMMAND_REGISTRATION_DONE
474 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
476 .name = "xlnx_pcie_xvc",
478 .help = "perform xlnx_pcie_xvc management",
479 .chain = xlnx_pcie_xvc_subcommand_handlers,
482 COMMAND_REGISTRATION_DONE
485 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
486 .execute_queue = &xlnx_pcie_xvc_execute_queue,
489 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
497 write = MIN(XLNX_XVC_MAX_BITS, left);
498 send = buf_get_u32(seq, 0, write);
499 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
503 seq += sizeof(uint32_t);
509 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
513 LOG_DEBUG("SWD line reset");
514 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
515 swd_seq_line_reset_len);
517 LOG_DEBUG("JTAG-to-SWD");
518 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
519 swd_seq_jtag_to_swd_len);
521 LOG_DEBUG("SWD-to-JTAG");
522 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
523 swd_seq_swd_to_jtag_len);
525 LOG_ERROR("Sequence %d not supported", seq);
532 static int queued_retval;
534 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
535 uint32_t ap_delay_clk);
537 static void swd_clear_sticky_errors(void)
539 xlnx_pcie_xvc_swd_write_reg(swd_cmd(false, false, DP_ABORT),
540 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
543 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
544 uint32_t ap_delay_clk)
546 uint32_t res, ack, rpar;
549 assert(cmd & SWD_CMD_RNW);
551 cmd |= SWD_CMD_START | SWD_CMD_PARK;
553 err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
560 err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
565 err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
569 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
570 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
571 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
572 cmd & SWD_CMD_APNDP ? "AP" : "DP",
573 cmd & SWD_CMD_RNW ? "read" : "write",
574 (cmd & SWD_CMD_A32) >> 1,
578 if (MASK_PAR(rpar) != parity_u32(res)) {
579 LOG_DEBUG_IO("Wrong parity detected");
580 queued_retval = ERROR_FAIL;
585 if (cmd & SWD_CMD_APNDP)
586 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
590 LOG_DEBUG_IO("SWD_ACK_WAIT");
591 swd_clear_sticky_errors();
594 LOG_DEBUG_IO("SWD_ACK_FAULT");
598 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
606 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
607 uint32_t ap_delay_clk)
612 assert(!(cmd & SWD_CMD_RNW));
614 cmd |= SWD_CMD_START | SWD_CMD_PARK;
615 /* cmd + trn + ack */
616 err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
623 err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
628 err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
632 LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
633 ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
634 "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
635 cmd & SWD_CMD_APNDP ? "AP" : "DP",
636 cmd & SWD_CMD_RNW ? "read" : "write",
637 (cmd & SWD_CMD_A32) >> 1,
642 if (cmd & SWD_CMD_APNDP)
643 err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
647 LOG_DEBUG_IO("SWD_ACK_WAIT");
648 swd_clear_sticky_errors();
651 LOG_DEBUG_IO("SWD_ACK_FAULT");
655 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
664 static int xlnx_pcie_xvc_swd_run_queue(void)
668 /* we want at least 8 idle cycles between each transaction */
669 err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
674 queued_retval = ERROR_OK;
675 LOG_DEBUG("SWD queue return value: %02x", err);
680 static int xlnx_pcie_xvc_swd_init(void)
685 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
686 .init = xlnx_pcie_xvc_swd_init,
687 .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
688 .read_reg = xlnx_pcie_xvc_swd_read_reg,
689 .write_reg = xlnx_pcie_xvc_swd_write_reg,
690 .run = xlnx_pcie_xvc_swd_run_queue,
693 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
695 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
696 .name = "xlnx_pcie_xvc",
697 .transports = xlnx_pcie_xvc_transports,
698 .commands = xlnx_pcie_xvc_command_handlers,
700 .init = &xlnx_pcie_xvc_init,
701 .quit = &xlnx_pcie_xvc_quit,
703 .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
704 .swd_ops = &xlnx_pcie_xvc_swd_ops,