1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Google, LLC.
4 * Author: Moritz Fischer <moritzf@google.com>
15 #include <linux/pci.h>
17 #include <jtag/interface.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
23 #define PCIE_EXT_CAP_LST 0x100
25 #define XLNX_XVC_EXT_CAP 0x00
26 #define XLNX_XVC_VSEC_HDR 0x04
27 #define XLNX_XVC_LEN_REG 0x0C
28 #define XLNX_XVC_TMS_REG 0x10
29 #define XLNX_XVC_TDx_REG 0x14
31 #define XLNX_XVC_CAP_SIZE 0x20
32 #define XLNX_XVC_VSEC_ID 0x8
33 #define XLNX_XVC_MAX_BITS 0x20
35 struct xlnx_pcie_xvc {
41 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
42 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
44 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
49 /* Note: This should be ok endianess-wise because by going
50 * through sysfs the kernel does the conversion in the config
51 * space accessor functions
53 err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
54 xlnx_pcie_xvc->offset + offset);
55 if (err != sizeof(res)) {
56 LOG_ERROR("Failed to read offset %x", offset);
57 return ERROR_JTAG_DEVICE_ERROR;
66 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
70 /* Note: This should be ok endianess-wise because by going
71 * through sysfs the kernel does the conversion in the config
72 * space accessor functions
74 err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
75 xlnx_pcie_xvc->offset + offset);
76 if (err != sizeof(val)) {
77 LOG_ERROR("Failed to write offset: %x with value: %x",
79 return ERROR_JTAG_DEVICE_ERROR;
85 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
90 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
94 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
98 err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDx_REG, tdi);
102 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDx_REG, tdo);
107 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %x, tdi: %x, tdo: %x",
108 num_bits, tms, tdi, *tdo);
110 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %x, tdi: %x, tdo: <null>",
115 int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
117 int tms = tap_get_state() == TAP_RESET ? 1 : 0;
118 size_t left = cmd->cmd.stableclocks->num_cycles;
122 LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
125 write = MIN(XLNX_XVC_MAX_BITS, left);
126 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
135 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
137 uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
138 tap_get_end_state());
139 int tms_count = tap_get_tms_path_len(tap_get_state(),
140 tap_get_end_state());
143 LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
144 tap_state_name(tap_get_state()),
145 tap_state_name(tap_get_end_state()));
148 err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
152 tap_set_state(tap_get_end_state());
157 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
161 LOG_DEBUG("runtest %i cycles, end in %i",
162 cmd->cmd.runtest->num_cycles,
163 cmd->cmd.runtest->end_state);
165 tap_state_t tmp_state = tap_get_end_state();
167 if (tap_get_state() != TAP_IDLE) {
168 tap_set_end_state(TAP_IDLE);
169 err = xlnx_pcie_xvc_execute_statemove(0);
174 size_t left = cmd->cmd.runtest->num_cycles;
178 write = MIN(XLNX_XVC_MAX_BITS, left);
179 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
185 tap_set_end_state(tmp_state);
186 if (tap_get_state() != tap_get_end_state())
187 err = xlnx_pcie_xvc_execute_statemove(0);
192 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
194 size_t num_states = cmd->cmd.pathmove->num_states;
195 tap_state_t *path = cmd->cmd.pathmove->path;
199 LOG_DEBUG("pathmove: %i states, end in %i",
200 cmd->cmd.pathmove->num_states,
201 cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
203 for (i = 0; i < num_states; i++) {
204 if (path[i] == tap_state_transition(tap_get_state(), false)) {
205 err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
206 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
207 err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
209 LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
210 tap_state_name(tap_get_state()),
211 tap_state_name(path[i]));
212 err = ERROR_JTAG_QUEUE_FAILED;
216 tap_set_state(path[i]);
219 tap_set_end_state(tap_get_state());
224 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
226 enum scan_type type = jtag_scan_type(cmd->cmd.scan);
227 tap_state_t saved_end_state = cmd->cmd.scan->end_state;
228 bool ir_scan = cmd->cmd.scan->ir_scan;
229 uint32_t tdi, tms, tdo;
230 uint8_t *buf, *rd_ptr;
235 scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
237 LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
238 (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
239 tap_state_name(tap_get_state()),
240 tap_state_name(cmd->cmd.scan->end_state));
242 /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
243 * vice-versa, do a statemove to corresponding other state, then restore
246 if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
247 tap_set_end_state(TAP_IRSHIFT);
248 err = xlnx_pcie_xvc_execute_statemove(0);
251 tap_set_end_state(saved_end_state);
252 } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
253 tap_set_end_state(TAP_DRSHIFT);
254 err = xlnx_pcie_xvc_execute_statemove(0);
257 tap_set_end_state(saved_end_state);
262 write = MIN(XLNX_XVC_MAX_BITS, left);
263 /* the last TMS should be a 1, to leave the state */
264 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
265 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
266 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
271 if (type != SCAN_OUT)
272 buf_set_u32(rd_ptr, 0, write, tdo);
273 rd_ptr += sizeof(uint32_t);
276 err = jtag_read_buffer(buf, cmd->cmd.scan);
280 if (tap_get_state() != tap_get_end_state())
281 err = xlnx_pcie_xvc_execute_statemove(1);
291 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
293 LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
294 cmd->cmd.reset->srst);
297 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
299 LOG_DEBUG("sleep %" PRIi32 "", cmd->cmd.sleep->us);
300 usleep(cmd->cmd.sleep->us);
303 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
305 const size_t num_bits = cmd->cmd.tms->num_bits;
306 const uint8_t *bits = cmd->cmd.tms->bits;
311 LOG_DEBUG("execute tms %zu", num_bits);
315 write = MIN(XLNX_XVC_MAX_BITS, left);
316 tms = buf_get_u32(bits, 0, write);
317 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
327 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
329 LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
331 case JTAG_STABLECLOCKS:
332 return xlnx_pcie_xvc_execute_stableclocks(cmd);
334 return xlnx_pcie_xvc_execute_runtest(cmd);
336 tap_set_end_state(cmd->cmd.statemove->end_state);
337 return xlnx_pcie_xvc_execute_statemove(0);
339 return xlnx_pcie_xvc_execute_pathmove(cmd);
341 return xlnx_pcie_xvc_execute_scan(cmd);
343 xlnx_pcie_xvc_execute_reset(cmd);
346 xlnx_pcie_xvc_execute_sleep(cmd);
349 return xlnx_pcie_xvc_execute_tms(cmd);
351 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
352 return ERROR_JTAG_QUEUE_FAILED;
358 static int xlnx_pcie_xvc_execute_queue(void)
360 struct jtag_command *cmd = jtag_command_queue;
364 ret = xlnx_pcie_xvc_execute_command(cmd);
376 static int xlnx_pcie_xvc_init(void)
378 char filename[PATH_MAX];
382 snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
383 xlnx_pcie_xvc->device);
384 xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
385 if (xlnx_pcie_xvc->fd < 0) {
386 LOG_ERROR("Failed to open device: %s", filename);
387 return ERROR_JTAG_INIT_FAILED;
390 LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
391 xlnx_pcie_xvc->device);
392 /* Parse the PCIe extended capability list and try to find
393 * vendor specific header */
394 xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
395 while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
396 xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
397 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
400 LOG_DEBUG("Checking capability at 0x%x; id=0x%04x version=0x%x next=0x%x",
401 xlnx_pcie_xvc->offset,
403 PCI_EXT_CAP_VER(cap),
404 PCI_EXT_CAP_NEXT(cap));
405 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
406 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
409 LOG_DEBUG("Checking possible match at 0x%x; id: 0x%x; rev: 0x%x; length: 0x%x",
410 xlnx_pcie_xvc->offset,
411 PCI_VNDR_HEADER_ID(vh),
412 PCI_VNDR_HEADER_REV(vh),
413 PCI_VNDR_HEADER_LEN(vh));
414 if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
415 (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
418 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
420 if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
421 xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
422 close(xlnx_pcie_xvc->fd);
423 return ERROR_JTAG_INIT_FAILED;
426 LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
431 static int xlnx_pcie_xvc_quit(void)
435 err = close(xlnx_pcie_xvc->fd);
442 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
445 return ERROR_COMMAND_SYNTAX_ERROR;
447 /* we can't really free this in a safe manner, so at least
448 * limit the memory we're leaking by freeing the old one first
449 * before allocating a new one ...
451 if (xlnx_pcie_xvc->device)
452 free(xlnx_pcie_xvc->device);
454 xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
458 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
460 .name = "xlnx_pcie_xvc_config",
461 .handler = xlnx_pcie_xvc_handle_config_command,
462 .mode = COMMAND_CONFIG,
463 .help = "Configure XVC/PCIe JTAG adapter",
466 COMMAND_REGISTRATION_DONE
469 static struct jtag_interface xlnx_pcie_xvc_interface = {
470 .execute_queue = &xlnx_pcie_xvc_execute_queue,
473 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
474 .name = "xlnx_pcie_xvc",
475 .transports = jtag_only,
476 .commands = xlnx_pcie_xvc_command_handlers,
478 .init = &xlnx_pcie_xvc_init,
479 .quit = &xlnx_pcie_xvc_quit,
481 .jtag_ops = &xlnx_pcie_xvc_interface,