1fc571d2c7011998b4e2c2a519669a448b8d2c55
[fw/openocd] / src / jtag / drivers / xlnx-pcie-xvc.c
1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 /*
4  * Copyright (c) 2019 Google, LLC.
5  * Author: Moritz Fischer <moritzf@google.com>
6  */
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include <stdint.h>
13 #include <stdlib.h>
14 #include <math.h>
15 #include <unistd.h>
16 #include <linux/pci.h>
17
18 #include <jtag/interface.h>
19 #include <jtag/swd.h>
20 #include <jtag/commands.h>
21 #include <helper/replacements.h>
22 #include <helper/bits.h>
23
24 /* Available only from kernel v4.10 */
25 #ifndef PCI_CFG_SPACE_EXP_SIZE
26 #define PCI_CFG_SPACE_EXP_SIZE  4096
27 #endif
28
29 #define PCIE_EXT_CAP_LST        0x100
30
31 #define XLNX_XVC_EXT_CAP        0x00
32 #define XLNX_XVC_VSEC_HDR       0x04
33 #define XLNX_XVC_LEN_REG        0x0C
34 #define XLNX_XVC_TMS_REG        0x10
35 #define XLNX_XVC_TDX_REG        0x14
36
37 #define XLNX_XVC_CAP_SIZE       0x20
38 #define XLNX_XVC_VSEC_ID        0x8
39 #define XLNX_XVC_MAX_BITS       0x20
40
41 #define MASK_ACK(x) (((x) >> 9) & 0x7)
42 #define MASK_PAR(x) ((int)((x) & 0x1))
43
44 struct xlnx_pcie_xvc {
45         int fd;
46         unsigned offset;
47         char *device;
48 };
49
50 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
51 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
52
53 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
54 {
55         uint32_t res;
56         int err;
57
58         /* Note: This should be ok endianness-wise because by going
59          * through sysfs the kernel does the conversion in the config
60          * space accessor functions
61          */
62         err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
63                     xlnx_pcie_xvc->offset + offset);
64         if (err != sizeof(res)) {
65                 LOG_ERROR("Failed to read offset %x", offset);
66                 return ERROR_JTAG_DEVICE_ERROR;
67         }
68
69         if (val)
70                 *val = res;
71
72         return ERROR_OK;
73 }
74
75 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
76 {
77         int err;
78
79         /* Note: This should be ok endianness-wise because by going
80          * through sysfs the kernel does the conversion in the config
81          * space accessor functions
82          */
83         err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
84                      xlnx_pcie_xvc->offset + offset);
85         if (err != sizeof(val)) {
86                 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32,
87                           offset, val);
88                 return ERROR_JTAG_DEVICE_ERROR;
89         }
90
91         return ERROR_OK;
92 }
93
94 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
95                                   uint32_t *tdo)
96 {
97         int err;
98
99         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
100         if (err != ERROR_OK)
101                 return err;
102
103         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
104         if (err != ERROR_OK)
105                 return err;
106
107         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDX_REG, tdi);
108         if (err != ERROR_OK)
109                 return err;
110
111         err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDX_REG, tdo);
112         if (err != ERROR_OK)
113                 return err;
114
115         if (tdo)
116                 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: %" PRIx32,
117                              num_bits, tms, tdi, *tdo);
118         else
119                 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: <null>",
120                              num_bits, tms, tdi);
121         return ERROR_OK;
122 }
123
124 static int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
125 {
126         int tms = tap_get_state() == TAP_RESET ? 1 : 0;
127         size_t left = cmd->cmd.stableclocks->num_cycles;
128         size_t write;
129         int err;
130
131         LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
132
133         while (left) {
134                 write = MIN(XLNX_XVC_MAX_BITS, left);
135                 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
136                 if (err != ERROR_OK)
137                         return err;
138                 left -= write;
139         };
140
141         return ERROR_OK;
142 }
143
144 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
145 {
146         uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
147                                             tap_get_end_state());
148         int tms_count = tap_get_tms_path_len(tap_get_state(),
149                                              tap_get_end_state());
150         int err;
151
152         LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
153                   tap_state_name(tap_get_state()),
154                   tap_state_name(tap_get_end_state()));
155
156
157         err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
158         if (err != ERROR_OK)
159                 return err;
160
161         tap_set_state(tap_get_end_state());
162
163         return ERROR_OK;
164 }
165
166 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
167 {
168         int err = ERROR_OK;
169
170         LOG_DEBUG("runtest %i cycles, end in %i",
171                   cmd->cmd.runtest->num_cycles,
172                   cmd->cmd.runtest->end_state);
173
174         tap_state_t tmp_state = tap_get_end_state();
175
176         if (tap_get_state() != TAP_IDLE) {
177                 tap_set_end_state(TAP_IDLE);
178                 err = xlnx_pcie_xvc_execute_statemove(0);
179                 if (err != ERROR_OK)
180                         return err;
181         };
182
183         size_t left = cmd->cmd.runtest->num_cycles;
184         size_t write;
185
186         while (left) {
187                 write = MIN(XLNX_XVC_MAX_BITS, left);
188                 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
189                 if (err != ERROR_OK)
190                         return err;
191                 left -= write;
192         };
193
194         tap_set_end_state(tmp_state);
195         if (tap_get_state() != tap_get_end_state())
196                 err = xlnx_pcie_xvc_execute_statemove(0);
197
198         return err;
199 }
200
201 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
202 {
203         size_t num_states = cmd->cmd.pathmove->num_states;
204         tap_state_t *path = cmd->cmd.pathmove->path;
205         int err = ERROR_OK;
206         size_t i;
207
208         LOG_DEBUG("pathmove: %i states, end in %i",
209                   cmd->cmd.pathmove->num_states,
210                   cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
211
212         for (i = 0; i < num_states; i++) {
213                 if (path[i] == tap_state_transition(tap_get_state(), false)) {
214                         err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
215                 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
216                         err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
217                 } else {
218                         LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
219                                   tap_state_name(tap_get_state()),
220                                   tap_state_name(path[i]));
221                         err = ERROR_JTAG_QUEUE_FAILED;
222                 }
223                 if (err != ERROR_OK)
224                         return err;
225                 tap_set_state(path[i]);
226         }
227
228         tap_set_end_state(tap_get_state());
229
230         return ERROR_OK;
231 }
232
233 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
234 {
235         enum scan_type type = jtag_scan_type(cmd->cmd.scan);
236         tap_state_t saved_end_state = cmd->cmd.scan->end_state;
237         bool ir_scan = cmd->cmd.scan->ir_scan;
238         uint32_t tdi, tms, tdo;
239         uint8_t *buf, *rd_ptr;
240         int err, scan_size;
241         size_t write;
242         size_t left;
243
244         scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
245         rd_ptr = buf;
246         LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
247                   (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
248                   tap_state_name(tap_get_state()),
249                   tap_state_name(cmd->cmd.scan->end_state));
250
251         /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
252          * vice-versa, do a statemove to corresponding other state, then restore
253          * end state
254          */
255         if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
256                 tap_set_end_state(TAP_IRSHIFT);
257                 err = xlnx_pcie_xvc_execute_statemove(0);
258                 if (err != ERROR_OK)
259                         goto out_err;
260                 tap_set_end_state(saved_end_state);
261         } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
262                 tap_set_end_state(TAP_DRSHIFT);
263                 err = xlnx_pcie_xvc_execute_statemove(0);
264                 if (err != ERROR_OK)
265                         goto out_err;
266                 tap_set_end_state(saved_end_state);
267         }
268
269         left = scan_size;
270         while (left) {
271                 write = MIN(XLNX_XVC_MAX_BITS, left);
272                 /* the last TMS should be a 1, to leave the state */
273                 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
274                 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
275                 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
276                                              &tdo : NULL);
277                 if (err != ERROR_OK)
278                         goto out_err;
279                 left -= write;
280                 if (type != SCAN_OUT)
281                         buf_set_u32(rd_ptr, 0, write, tdo);
282                 rd_ptr += sizeof(uint32_t);
283         };
284
285         err = jtag_read_buffer(buf, cmd->cmd.scan);
286         free(buf);
287
288         if (tap_get_state() != tap_get_end_state())
289                 err = xlnx_pcie_xvc_execute_statemove(1);
290
291         return err;
292
293 out_err:
294         free(buf);
295         return err;
296 }
297
298 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
299 {
300         LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
301                   cmd->cmd.reset->srst);
302 }
303
304 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
305 {
306         LOG_DEBUG("sleep %" PRIu32 "", cmd->cmd.sleep->us);
307         usleep(cmd->cmd.sleep->us);
308 }
309
310 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
311 {
312         const size_t num_bits = cmd->cmd.tms->num_bits;
313         const uint8_t *bits = cmd->cmd.tms->bits;
314         size_t left, write;
315         uint32_t tms;
316         int err;
317
318         LOG_DEBUG("execute tms %zu", num_bits);
319
320         left = num_bits;
321         while (left) {
322                 write = MIN(XLNX_XVC_MAX_BITS, left);
323                 tms = buf_get_u32(bits, 0, write);
324                 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
325                 if (err != ERROR_OK)
326                         return err;
327                 left -= write;
328                 bits += 4;
329         };
330
331         return ERROR_OK;
332 }
333
334 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
335 {
336         LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
337         switch (cmd->type) {
338         case JTAG_STABLECLOCKS:
339                 return xlnx_pcie_xvc_execute_stableclocks(cmd);
340         case JTAG_RUNTEST:
341                 return xlnx_pcie_xvc_execute_runtest(cmd);
342         case JTAG_TLR_RESET:
343                 tap_set_end_state(cmd->cmd.statemove->end_state);
344                 return xlnx_pcie_xvc_execute_statemove(0);
345         case JTAG_PATHMOVE:
346                 return xlnx_pcie_xvc_execute_pathmove(cmd);
347         case JTAG_SCAN:
348                 return xlnx_pcie_xvc_execute_scan(cmd);
349         case JTAG_RESET:
350                 xlnx_pcie_xvc_execute_reset(cmd);
351                 break;
352         case JTAG_SLEEP:
353                 xlnx_pcie_xvc_execute_sleep(cmd);
354                 break;
355         case JTAG_TMS:
356                 return xlnx_pcie_xvc_execute_tms(cmd);
357         default:
358                 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
359                 return ERROR_JTAG_QUEUE_FAILED;
360         }
361
362         return ERROR_OK;
363 }
364
365 static int xlnx_pcie_xvc_execute_queue(void)
366 {
367         struct jtag_command *cmd = jtag_command_queue;
368         int ret;
369
370         while (cmd) {
371                 ret = xlnx_pcie_xvc_execute_command(cmd);
372
373                 if (ret != ERROR_OK)
374                         return ret;
375
376                 cmd = cmd->next;
377         }
378
379         return ERROR_OK;
380 }
381
382
383 static int xlnx_pcie_xvc_init(void)
384 {
385         char filename[PATH_MAX];
386         uint32_t cap, vh;
387         int err;
388
389         snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
390                  xlnx_pcie_xvc->device);
391         xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
392         if (xlnx_pcie_xvc->fd < 0) {
393                 LOG_ERROR("Failed to open device: %s", filename);
394                 return ERROR_JTAG_INIT_FAILED;
395         }
396
397         LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
398                  xlnx_pcie_xvc->device);
399         /* Parse the PCIe extended capability list and try to find
400          * vendor specific header */
401         xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
402         while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
403                xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
404                 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
405                 if (err != ERROR_OK)
406                         return err;
407                 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32 " version=0x%" PRIx32 " next=0x%" PRIx32,
408                          xlnx_pcie_xvc->offset,
409                          PCI_EXT_CAP_ID(cap),
410                          PCI_EXT_CAP_VER(cap),
411                          PCI_EXT_CAP_NEXT(cap));
412                 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
413                         err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
414                         if (err != ERROR_OK)
415                                 return err;
416                         LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32 "; rev: 0x%" PRIx32 "; length: 0x%" PRIx32,
417                                  xlnx_pcie_xvc->offset,
418                                  PCI_VNDR_HEADER_ID(vh),
419                                  PCI_VNDR_HEADER_REV(vh),
420                                  PCI_VNDR_HEADER_LEN(vh));
421                         if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
422                             (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
423                                 break;
424                 }
425                 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
426         }
427         if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
428              xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
429                 close(xlnx_pcie_xvc->fd);
430                 return ERROR_JTAG_INIT_FAILED;
431         }
432
433         LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
434
435         return ERROR_OK;
436 }
437
438 static int xlnx_pcie_xvc_quit(void)
439 {
440         int err;
441
442         err = close(xlnx_pcie_xvc->fd);
443         if (err)
444                 return err;
445
446         return ERROR_OK;
447 }
448
449 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
450 {
451         if (CMD_ARGC < 1)
452                 return ERROR_COMMAND_SYNTAX_ERROR;
453
454         /* we can't really free this in a safe manner, so at least
455          * limit the memory we're leaking by freeing the old one first
456          * before allocating a new one ...
457          */
458         free(xlnx_pcie_xvc->device);
459
460         xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
461         return ERROR_OK;
462 }
463
464 static const struct command_registration xlnx_pcie_xvc_subcommand_handlers[] = {
465         {
466                 .name = "config",
467                 .handler = xlnx_pcie_xvc_handle_config_command,
468                 .mode = COMMAND_CONFIG,
469                 .help = "Configure XVC/PCIe JTAG adapter",
470                 .usage = "device",
471         },
472         COMMAND_REGISTRATION_DONE
473 };
474
475 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
476         {
477                 .name = "xlnx_pcie_xvc",
478                 .mode = COMMAND_ANY,
479                 .help = "perform xlnx_pcie_xvc management",
480                 .chain = xlnx_pcie_xvc_subcommand_handlers,
481                 .usage = "",
482         },
483         COMMAND_REGISTRATION_DONE
484 };
485
486 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
487         .execute_queue = &xlnx_pcie_xvc_execute_queue,
488 };
489
490 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
491 {
492         size_t left, write;
493         uint32_t send;
494         int err;
495
496         left = length;
497         while (left) {
498                 write = MIN(XLNX_XVC_MAX_BITS, left);
499                 send = buf_get_u32(seq, 0, write);
500                 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
501                 if (err != ERROR_OK)
502                         return err;
503                 left -= write;
504                 seq += sizeof(uint32_t);
505         };
506
507         return ERROR_OK;
508 }
509
510 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
511 {
512         switch (seq) {
513         case LINE_RESET:
514                 LOG_DEBUG("SWD line reset");
515                 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
516                                                   swd_seq_line_reset_len);
517         case JTAG_TO_SWD:
518                 LOG_DEBUG("JTAG-to-SWD");
519                 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
520                                                   swd_seq_jtag_to_swd_len);
521         case SWD_TO_JTAG:
522                 LOG_DEBUG("SWD-to-JTAG");
523                 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
524                                                   swd_seq_swd_to_jtag_len);
525         default:
526                 LOG_ERROR("Sequence %d not supported", seq);
527                 return ERROR_FAIL;
528         }
529
530         return ERROR_OK;
531 }
532
533 static int queued_retval;
534
535 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
536                                         uint32_t ap_delay_clk);
537
538 static void swd_clear_sticky_errors(void)
539 {
540         xlnx_pcie_xvc_swd_write_reg(swd_cmd(false,  false, DP_ABORT),
541                 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
542 }
543
544 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
545                                        uint32_t ap_delay_clk)
546 {
547         uint32_t res, ack, rpar;
548         int err;
549
550         assert(cmd & SWD_CMD_RNW);
551
552         cmd |= SWD_CMD_START | SWD_CMD_PARK;
553         /* cmd + ack */
554         err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
555         if (err != ERROR_OK)
556                 goto err_out;
557
558         ack = MASK_ACK(res);
559
560         /* read data */
561         err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
562         if (err != ERROR_OK)
563                 goto err_out;
564
565         /* parity + trn */
566         err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
567         if (err != ERROR_OK)
568                 goto err_out;
569
570         LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
571                   ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
572                   "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
573                   cmd & SWD_CMD_APNDP ? "AP" : "DP",
574                   cmd & SWD_CMD_RNW ? "read" : "write",
575                   (cmd & SWD_CMD_A32) >> 1,
576                   res);
577         switch (ack) {
578         case SWD_ACK_OK:
579                 if (MASK_PAR(rpar) != parity_u32(res)) {
580                         LOG_DEBUG_IO("Wrong parity detected");
581                         queued_retval = ERROR_FAIL;
582                         return;
583                 }
584                 if (value)
585                         *value = res;
586                 if (cmd & SWD_CMD_APNDP)
587                         err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
588                 queued_retval = err;
589                 return;
590         case SWD_ACK_WAIT:
591                 LOG_DEBUG_IO("SWD_ACK_WAIT");
592                 swd_clear_sticky_errors();
593                 return;
594         case SWD_ACK_FAULT:
595                 LOG_DEBUG_IO("SWD_ACK_FAULT");
596                 queued_retval = ack;
597                 return;
598         default:
599                 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
600                 queued_retval = ack;
601                 return;
602         }
603 err_out:
604         queued_retval = err;
605 }
606
607 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
608                                         uint32_t ap_delay_clk)
609 {
610         uint32_t res, ack;
611         int err;
612
613         assert(!(cmd & SWD_CMD_RNW));
614
615         cmd |= SWD_CMD_START | SWD_CMD_PARK;
616         /* cmd + trn + ack */
617         err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
618         if (err != ERROR_OK)
619                 goto err_out;
620
621         ack = MASK_ACK(res);
622
623         /* write data */
624         err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
625         if (err != ERROR_OK)
626                 goto err_out;
627
628         /* parity + trn */
629         err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
630         if (err != ERROR_OK)
631                 goto err_out;
632
633         LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
634                   ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
635                   "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
636                   cmd & SWD_CMD_APNDP ? "AP" : "DP",
637                   cmd & SWD_CMD_RNW ? "read" : "write",
638                   (cmd & SWD_CMD_A32) >> 1,
639                   value);
640
641         switch (ack) {
642         case SWD_ACK_OK:
643                 if (cmd & SWD_CMD_APNDP)
644                         err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
645                 queued_retval = err;
646                 return;
647         case SWD_ACK_WAIT:
648                 LOG_DEBUG_IO("SWD_ACK_WAIT");
649                 swd_clear_sticky_errors();
650                 return;
651         case SWD_ACK_FAULT:
652                 LOG_DEBUG_IO("SWD_ACK_FAULT");
653                 queued_retval = ack;
654                 return;
655         default:
656                 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
657                 queued_retval = ack;
658                 return;
659         }
660
661 err_out:
662         queued_retval = err;
663 }
664
665 static int xlnx_pcie_xvc_swd_run_queue(void)
666 {
667         int err;
668
669         /* we want at least 8 idle cycles between each transaction */
670         err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
671         if (err != ERROR_OK)
672                 return err;
673
674         err = queued_retval;
675         queued_retval = ERROR_OK;
676         LOG_DEBUG("SWD queue return value: %02x", err);
677
678         return err;
679 }
680
681 static int xlnx_pcie_xvc_swd_init(void)
682 {
683         return ERROR_OK;
684 }
685
686 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
687         .init = xlnx_pcie_xvc_swd_init,
688         .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
689         .read_reg = xlnx_pcie_xvc_swd_read_reg,
690         .write_reg = xlnx_pcie_xvc_swd_write_reg,
691         .run = xlnx_pcie_xvc_swd_run_queue,
692 };
693
694 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
695
696 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
697         .name = "xlnx_pcie_xvc",
698         .transports = xlnx_pcie_xvc_transports,
699         .commands = xlnx_pcie_xvc_command_handlers,
700
701         .init = &xlnx_pcie_xvc_init,
702         .quit = &xlnx_pcie_xvc_quit,
703
704         .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
705         .swd_ops  = &xlnx_pcie_xvc_swd_ops,
706 };