c05b9cf4abf5e0e633be763d4142506377491bb7
[fw/openocd] / src / jtag / drivers / xlnx-pcie-xvc.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * Copyright (c) 2019 Google, LLC.
4  * Author: Moritz Fischer <moritzf@google.com>
5  */
6
7 #ifdef HAVE_CONFIG_H
8 #include "config.h"
9 #endif
10
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <math.h>
14 #include <unistd.h>
15 #include <linux/pci.h>
16
17 #include <jtag/interface.h>
18 #include <jtag/swd.h>
19 #include <jtag/commands.h>
20 #include <helper/replacements.h>
21 #include <helper/bits.h>
22
23 /* Available only from kernel v4.10 */
24 #ifndef PCI_CFG_SPACE_EXP_SIZE
25 #define PCI_CFG_SPACE_EXP_SIZE  4096
26 #endif
27
28 #define PCIE_EXT_CAP_LST        0x100
29
30 #define XLNX_XVC_EXT_CAP        0x00
31 #define XLNX_XVC_VSEC_HDR       0x04
32 #define XLNX_XVC_LEN_REG        0x0C
33 #define XLNX_XVC_TMS_REG        0x10
34 #define XLNX_XVC_TDX_REG        0x14
35
36 #define XLNX_XVC_CAP_SIZE       0x20
37 #define XLNX_XVC_VSEC_ID        0x8
38 #define XLNX_XVC_MAX_BITS       0x20
39
40 #define MASK_ACK(x) (((x) >> 9) & 0x7)
41 #define MASK_PAR(x) ((int)((x) & 0x1))
42
43 struct xlnx_pcie_xvc {
44         int fd;
45         unsigned offset;
46         char *device;
47 };
48
49 static struct xlnx_pcie_xvc xlnx_pcie_xvc_state;
50 static struct xlnx_pcie_xvc *xlnx_pcie_xvc = &xlnx_pcie_xvc_state;
51
52 static int xlnx_pcie_xvc_read_reg(const int offset, uint32_t *val)
53 {
54         uint32_t res;
55         int err;
56
57         /* Note: This should be ok endianness-wise because by going
58          * through sysfs the kernel does the conversion in the config
59          * space accessor functions
60          */
61         err = pread(xlnx_pcie_xvc->fd, &res, sizeof(res),
62                     xlnx_pcie_xvc->offset + offset);
63         if (err != sizeof(res)) {
64                 LOG_ERROR("Failed to read offset %x", offset);
65                 return ERROR_JTAG_DEVICE_ERROR;
66         }
67
68         if (val)
69                 *val = res;
70
71         return ERROR_OK;
72 }
73
74 static int xlnx_pcie_xvc_write_reg(const int offset, const uint32_t val)
75 {
76         int err;
77
78         /* Note: This should be ok endianness-wise because by going
79          * through sysfs the kernel does the conversion in the config
80          * space accessor functions
81          */
82         err = pwrite(xlnx_pcie_xvc->fd, &val, sizeof(val),
83                      xlnx_pcie_xvc->offset + offset);
84         if (err != sizeof(val)) {
85                 LOG_ERROR("Failed to write offset: %x with value: %" PRIx32,
86                           offset, val);
87                 return ERROR_JTAG_DEVICE_ERROR;
88         }
89
90         return ERROR_OK;
91 }
92
93 static int xlnx_pcie_xvc_transact(size_t num_bits, uint32_t tms, uint32_t tdi,
94                                   uint32_t *tdo)
95 {
96         int err;
97
98         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_LEN_REG, num_bits);
99         if (err != ERROR_OK)
100                 return err;
101
102         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TMS_REG, tms);
103         if (err != ERROR_OK)
104                 return err;
105
106         err = xlnx_pcie_xvc_write_reg(XLNX_XVC_TDX_REG, tdi);
107         if (err != ERROR_OK)
108                 return err;
109
110         err = xlnx_pcie_xvc_read_reg(XLNX_XVC_TDX_REG, tdo);
111         if (err != ERROR_OK)
112                 return err;
113
114         if (tdo)
115                 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: %" PRIx32,
116                              num_bits, tms, tdi, *tdo);
117         else
118                 LOG_DEBUG_IO("Transact num_bits: %zu, tms: %" PRIx32 ", tdi: %" PRIx32 ", tdo: <null>",
119                              num_bits, tms, tdi);
120         return ERROR_OK;
121 }
122
123 static int xlnx_pcie_xvc_execute_stableclocks(struct jtag_command *cmd)
124 {
125         int tms = tap_get_state() == TAP_RESET ? 1 : 0;
126         size_t left = cmd->cmd.stableclocks->num_cycles;
127         size_t write;
128         int err;
129
130         LOG_DEBUG("stableclocks %i cycles", cmd->cmd.runtest->num_cycles);
131
132         while (left) {
133                 write = MIN(XLNX_XVC_MAX_BITS, left);
134                 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
135                 if (err != ERROR_OK)
136                         return err;
137                 left -= write;
138         };
139
140         return ERROR_OK;
141 }
142
143 static int xlnx_pcie_xvc_execute_statemove(size_t skip)
144 {
145         uint8_t tms_scan = tap_get_tms_path(tap_get_state(),
146                                             tap_get_end_state());
147         int tms_count = tap_get_tms_path_len(tap_get_state(),
148                                              tap_get_end_state());
149         int err;
150
151         LOG_DEBUG("statemove starting at (skip: %zu) %s end in %s", skip,
152                   tap_state_name(tap_get_state()),
153                   tap_state_name(tap_get_end_state()));
154
155
156         err = xlnx_pcie_xvc_transact(tms_count - skip, tms_scan >> skip, 0, NULL);
157         if (err != ERROR_OK)
158                 return err;
159
160         tap_set_state(tap_get_end_state());
161
162         return ERROR_OK;
163 }
164
165 static int xlnx_pcie_xvc_execute_runtest(struct jtag_command *cmd)
166 {
167         int err = ERROR_OK;
168
169         LOG_DEBUG("runtest %i cycles, end in %i",
170                   cmd->cmd.runtest->num_cycles,
171                   cmd->cmd.runtest->end_state);
172
173         tap_state_t tmp_state = tap_get_end_state();
174
175         if (tap_get_state() != TAP_IDLE) {
176                 tap_set_end_state(TAP_IDLE);
177                 err = xlnx_pcie_xvc_execute_statemove(0);
178                 if (err != ERROR_OK)
179                         return err;
180         };
181
182         size_t left = cmd->cmd.runtest->num_cycles;
183         size_t write;
184
185         while (left) {
186                 write = MIN(XLNX_XVC_MAX_BITS, left);
187                 err = xlnx_pcie_xvc_transact(write, 0, 0, NULL);
188                 if (err != ERROR_OK)
189                         return err;
190                 left -= write;
191         };
192
193         tap_set_end_state(tmp_state);
194         if (tap_get_state() != tap_get_end_state())
195                 err = xlnx_pcie_xvc_execute_statemove(0);
196
197         return err;
198 }
199
200 static int xlnx_pcie_xvc_execute_pathmove(struct jtag_command *cmd)
201 {
202         size_t num_states = cmd->cmd.pathmove->num_states;
203         tap_state_t *path = cmd->cmd.pathmove->path;
204         int err = ERROR_OK;
205         size_t i;
206
207         LOG_DEBUG("pathmove: %i states, end in %i",
208                   cmd->cmd.pathmove->num_states,
209                   cmd->cmd.pathmove->path[cmd->cmd.pathmove->num_states - 1]);
210
211         for (i = 0; i < num_states; i++) {
212                 if (path[i] == tap_state_transition(tap_get_state(), false)) {
213                         err = xlnx_pcie_xvc_transact(1, 1, 0, NULL);
214                 } else if (path[i] == tap_state_transition(tap_get_state(), true)) {
215                         err = xlnx_pcie_xvc_transact(1, 0, 0, NULL);
216                 } else {
217                         LOG_ERROR("BUG: %s -> %s isn't a valid TAP transition.",
218                                   tap_state_name(tap_get_state()),
219                                   tap_state_name(path[i]));
220                         err = ERROR_JTAG_QUEUE_FAILED;
221                 }
222                 if (err != ERROR_OK)
223                         return err;
224                 tap_set_state(path[i]);
225         }
226
227         tap_set_end_state(tap_get_state());
228
229         return ERROR_OK;
230 }
231
232 static int xlnx_pcie_xvc_execute_scan(struct jtag_command *cmd)
233 {
234         enum scan_type type = jtag_scan_type(cmd->cmd.scan);
235         tap_state_t saved_end_state = cmd->cmd.scan->end_state;
236         bool ir_scan = cmd->cmd.scan->ir_scan;
237         uint32_t tdi, tms, tdo;
238         uint8_t *buf, *rd_ptr;
239         int err, scan_size;
240         size_t write;
241         size_t left;
242
243         scan_size = jtag_build_buffer(cmd->cmd.scan, &buf);
244         rd_ptr = buf;
245         LOG_DEBUG("%s scan type %d %d bits; starts in %s end in %s",
246                   (cmd->cmd.scan->ir_scan) ? "IR" : "DR", type, scan_size,
247                   tap_state_name(tap_get_state()),
248                   tap_state_name(cmd->cmd.scan->end_state));
249
250         /* If we're in TAP_DR_SHIFT state but need to do a IR_SCAN or
251          * vice-versa, do a statemove to corresponding other state, then restore
252          * end state
253          */
254         if (ir_scan && tap_get_state() != TAP_IRSHIFT) {
255                 tap_set_end_state(TAP_IRSHIFT);
256                 err = xlnx_pcie_xvc_execute_statemove(0);
257                 if (err != ERROR_OK)
258                         goto out_err;
259                 tap_set_end_state(saved_end_state);
260         } else if (!ir_scan && (tap_get_state() != TAP_DRSHIFT)) {
261                 tap_set_end_state(TAP_DRSHIFT);
262                 err = xlnx_pcie_xvc_execute_statemove(0);
263                 if (err != ERROR_OK)
264                         goto out_err;
265                 tap_set_end_state(saved_end_state);
266         }
267
268         left = scan_size;
269         while (left) {
270                 write = MIN(XLNX_XVC_MAX_BITS, left);
271                 /* the last TMS should be a 1, to leave the state */
272                 tms = left <= XLNX_XVC_MAX_BITS ? BIT(write - 1) : 0;
273                 tdi = (type != SCAN_IN) ? buf_get_u32(rd_ptr, 0, write) : 0;
274                 err = xlnx_pcie_xvc_transact(write, tms, tdi, type != SCAN_OUT ?
275                                              &tdo : NULL);
276                 if (err != ERROR_OK)
277                         goto out_err;
278                 left -= write;
279                 if (type != SCAN_OUT)
280                         buf_set_u32(rd_ptr, 0, write, tdo);
281                 rd_ptr += sizeof(uint32_t);
282         };
283
284         err = jtag_read_buffer(buf, cmd->cmd.scan);
285         free(buf);
286
287         if (tap_get_state() != tap_get_end_state())
288                 err = xlnx_pcie_xvc_execute_statemove(1);
289
290         return err;
291
292 out_err:
293         free(buf);
294         return err;
295 }
296
297 static void xlnx_pcie_xvc_execute_reset(struct jtag_command *cmd)
298 {
299         LOG_DEBUG("reset trst: %i srst: %i", cmd->cmd.reset->trst,
300                   cmd->cmd.reset->srst);
301 }
302
303 static void xlnx_pcie_xvc_execute_sleep(struct jtag_command *cmd)
304 {
305         LOG_DEBUG("sleep %" PRIu32 "", cmd->cmd.sleep->us);
306         usleep(cmd->cmd.sleep->us);
307 }
308
309 static int xlnx_pcie_xvc_execute_tms(struct jtag_command *cmd)
310 {
311         const size_t num_bits = cmd->cmd.tms->num_bits;
312         const uint8_t *bits = cmd->cmd.tms->bits;
313         size_t left, write;
314         uint32_t tms;
315         int err;
316
317         LOG_DEBUG("execute tms %zu", num_bits);
318
319         left = num_bits;
320         while (left) {
321                 write = MIN(XLNX_XVC_MAX_BITS, left);
322                 tms = buf_get_u32(bits, 0, write);
323                 err = xlnx_pcie_xvc_transact(write, tms, 0, NULL);
324                 if (err != ERROR_OK)
325                         return err;
326                 left -= write;
327                 bits += 4;
328         };
329
330         return ERROR_OK;
331 }
332
333 static int xlnx_pcie_xvc_execute_command(struct jtag_command *cmd)
334 {
335         LOG_DEBUG("%s: cmd->type: %u", __func__, cmd->type);
336         switch (cmd->type) {
337         case JTAG_STABLECLOCKS:
338                 return xlnx_pcie_xvc_execute_stableclocks(cmd);
339         case JTAG_RUNTEST:
340                 return xlnx_pcie_xvc_execute_runtest(cmd);
341         case JTAG_TLR_RESET:
342                 tap_set_end_state(cmd->cmd.statemove->end_state);
343                 return xlnx_pcie_xvc_execute_statemove(0);
344         case JTAG_PATHMOVE:
345                 return xlnx_pcie_xvc_execute_pathmove(cmd);
346         case JTAG_SCAN:
347                 return xlnx_pcie_xvc_execute_scan(cmd);
348         case JTAG_RESET:
349                 xlnx_pcie_xvc_execute_reset(cmd);
350                 break;
351         case JTAG_SLEEP:
352                 xlnx_pcie_xvc_execute_sleep(cmd);
353                 break;
354         case JTAG_TMS:
355                 return xlnx_pcie_xvc_execute_tms(cmd);
356         default:
357                 LOG_ERROR("BUG: Unknown JTAG command type encountered.");
358                 return ERROR_JTAG_QUEUE_FAILED;
359         }
360
361         return ERROR_OK;
362 }
363
364 static int xlnx_pcie_xvc_execute_queue(void)
365 {
366         struct jtag_command *cmd = jtag_command_queue;
367         int ret;
368
369         while (cmd) {
370                 ret = xlnx_pcie_xvc_execute_command(cmd);
371
372                 if (ret != ERROR_OK)
373                         return ret;
374
375                 cmd = cmd->next;
376         }
377
378         return ERROR_OK;
379 }
380
381
382 static int xlnx_pcie_xvc_init(void)
383 {
384         char filename[PATH_MAX];
385         uint32_t cap, vh;
386         int err;
387
388         snprintf(filename, PATH_MAX, "/sys/bus/pci/devices/%s/config",
389                  xlnx_pcie_xvc->device);
390         xlnx_pcie_xvc->fd = open(filename, O_RDWR | O_SYNC);
391         if (xlnx_pcie_xvc->fd < 0) {
392                 LOG_ERROR("Failed to open device: %s", filename);
393                 return ERROR_JTAG_INIT_FAILED;
394         }
395
396         LOG_INFO("Scanning PCIe device %s's for Xilinx XVC/PCIe ...",
397                  xlnx_pcie_xvc->device);
398         /* Parse the PCIe extended capability list and try to find
399          * vendor specific header */
400         xlnx_pcie_xvc->offset = PCIE_EXT_CAP_LST;
401         while (xlnx_pcie_xvc->offset <= PCI_CFG_SPACE_EXP_SIZE - sizeof(cap) &&
402                xlnx_pcie_xvc->offset >= PCIE_EXT_CAP_LST) {
403                 err = xlnx_pcie_xvc_read_reg(XLNX_XVC_EXT_CAP, &cap);
404                 if (err != ERROR_OK)
405                         return err;
406                 LOG_DEBUG("Checking capability at 0x%x; id=0x%04" PRIx32 " version=0x%" PRIx32 " next=0x%" PRIx32,
407                          xlnx_pcie_xvc->offset,
408                          PCI_EXT_CAP_ID(cap),
409                          PCI_EXT_CAP_VER(cap),
410                          PCI_EXT_CAP_NEXT(cap));
411                 if (PCI_EXT_CAP_ID(cap) == PCI_EXT_CAP_ID_VNDR) {
412                         err = xlnx_pcie_xvc_read_reg(XLNX_XVC_VSEC_HDR, &vh);
413                         if (err != ERROR_OK)
414                                 return err;
415                         LOG_DEBUG("Checking possible match at 0x%x; id: 0x%" PRIx32 "; rev: 0x%" PRIx32 "; length: 0x%" PRIx32,
416                                  xlnx_pcie_xvc->offset,
417                                  PCI_VNDR_HEADER_ID(vh),
418                                  PCI_VNDR_HEADER_REV(vh),
419                                  PCI_VNDR_HEADER_LEN(vh));
420                         if ((PCI_VNDR_HEADER_ID(vh) == XLNX_XVC_VSEC_ID) &&
421                             (PCI_VNDR_HEADER_LEN(vh) == XLNX_XVC_CAP_SIZE))
422                                 break;
423                 }
424                 xlnx_pcie_xvc->offset = PCI_EXT_CAP_NEXT(cap);
425         }
426         if ((xlnx_pcie_xvc->offset > PCI_CFG_SPACE_EXP_SIZE - XLNX_XVC_CAP_SIZE) ||
427              xlnx_pcie_xvc->offset < PCIE_EXT_CAP_LST) {
428                 close(xlnx_pcie_xvc->fd);
429                 return ERROR_JTAG_INIT_FAILED;
430         }
431
432         LOG_INFO("Found Xilinx XVC/PCIe capability at offset: 0x%x", xlnx_pcie_xvc->offset);
433
434         return ERROR_OK;
435 }
436
437 static int xlnx_pcie_xvc_quit(void)
438 {
439         int err;
440
441         err = close(xlnx_pcie_xvc->fd);
442         if (err)
443                 return err;
444
445         return ERROR_OK;
446 }
447
448 COMMAND_HANDLER(xlnx_pcie_xvc_handle_config_command)
449 {
450         if (CMD_ARGC < 1)
451                 return ERROR_COMMAND_SYNTAX_ERROR;
452
453         /* we can't really free this in a safe manner, so at least
454          * limit the memory we're leaking by freeing the old one first
455          * before allocating a new one ...
456          */
457         free(xlnx_pcie_xvc->device);
458
459         xlnx_pcie_xvc->device = strdup(CMD_ARGV[0]);
460         return ERROR_OK;
461 }
462
463 static const struct command_registration xlnx_pcie_xvc_subcommand_handlers[] = {
464         {
465                 .name = "config",
466                 .handler = xlnx_pcie_xvc_handle_config_command,
467                 .mode = COMMAND_CONFIG,
468                 .help = "Configure XVC/PCIe JTAG adapter",
469                 .usage = "device",
470         },
471         COMMAND_REGISTRATION_DONE
472 };
473
474 static const struct command_registration xlnx_pcie_xvc_command_handlers[] = {
475         {
476                 .name = "xlnx_pcie_xvc",
477                 .mode = COMMAND_ANY,
478                 .help = "perform xlnx_pcie_xvc management",
479                 .chain = xlnx_pcie_xvc_subcommand_handlers,
480                 .usage = "",
481         },
482         COMMAND_REGISTRATION_DONE
483 };
484
485 static struct jtag_interface xlnx_pcie_xvc_jtag_ops = {
486         .execute_queue = &xlnx_pcie_xvc_execute_queue,
487 };
488
489 static int xlnx_pcie_xvc_swd_sequence(const uint8_t *seq, size_t length)
490 {
491         size_t left, write;
492         uint32_t send;
493         int err;
494
495         left = length;
496         while (left) {
497                 write = MIN(XLNX_XVC_MAX_BITS, left);
498                 send = buf_get_u32(seq, 0, write);
499                 err = xlnx_pcie_xvc_transact(write, send, 0, NULL);
500                 if (err != ERROR_OK)
501                         return err;
502                 left -= write;
503                 seq += sizeof(uint32_t);
504         };
505
506         return ERROR_OK;
507 }
508
509 static int xlnx_pcie_xvc_swd_switch_seq(enum swd_special_seq seq)
510 {
511         switch (seq) {
512         case LINE_RESET:
513                 LOG_DEBUG("SWD line reset");
514                 return xlnx_pcie_xvc_swd_sequence(swd_seq_line_reset,
515                                                   swd_seq_line_reset_len);
516         case JTAG_TO_SWD:
517                 LOG_DEBUG("JTAG-to-SWD");
518                 return xlnx_pcie_xvc_swd_sequence(swd_seq_jtag_to_swd,
519                                                   swd_seq_jtag_to_swd_len);
520         case SWD_TO_JTAG:
521                 LOG_DEBUG("SWD-to-JTAG");
522                 return xlnx_pcie_xvc_swd_sequence(swd_seq_swd_to_jtag,
523                                                   swd_seq_swd_to_jtag_len);
524         default:
525                 LOG_ERROR("Sequence %d not supported", seq);
526                 return ERROR_FAIL;
527         }
528
529         return ERROR_OK;
530 }
531
532 static int queued_retval;
533
534 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
535                                         uint32_t ap_delay_clk);
536
537 static void swd_clear_sticky_errors(void)
538 {
539         xlnx_pcie_xvc_swd_write_reg(swd_cmd(false,  false, DP_ABORT),
540                 STKCMPCLR | STKERRCLR | WDERRCLR | ORUNERRCLR, 0);
541 }
542
543 static void xlnx_pcie_xvc_swd_read_reg(uint8_t cmd, uint32_t *value,
544                                        uint32_t ap_delay_clk)
545 {
546         uint32_t res, ack, rpar;
547         int err;
548
549         assert(cmd & SWD_CMD_RNW);
550
551         cmd |= SWD_CMD_START | SWD_CMD_PARK;
552         /* cmd + ack */
553         err = xlnx_pcie_xvc_transact(12, cmd, 0, &res);
554         if (err != ERROR_OK)
555                 goto err_out;
556
557         ack = MASK_ACK(res);
558
559         /* read data */
560         err = xlnx_pcie_xvc_transact(32, 0, 0, &res);
561         if (err != ERROR_OK)
562                 goto err_out;
563
564         /* parity + trn */
565         err = xlnx_pcie_xvc_transact(2, 0, 0, &rpar);
566         if (err != ERROR_OK)
567                 goto err_out;
568
569         LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
570                   ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
571                   "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
572                   cmd & SWD_CMD_APNDP ? "AP" : "DP",
573                   cmd & SWD_CMD_RNW ? "read" : "write",
574                   (cmd & SWD_CMD_A32) >> 1,
575                   res);
576         switch (ack) {
577         case SWD_ACK_OK:
578                 if (MASK_PAR(rpar) != parity_u32(res)) {
579                         LOG_DEBUG_IO("Wrong parity detected");
580                         queued_retval = ERROR_FAIL;
581                         return;
582                 }
583                 if (value)
584                         *value = res;
585                 if (cmd & SWD_CMD_APNDP)
586                         err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
587                 queued_retval = err;
588                 return;
589         case SWD_ACK_WAIT:
590                 LOG_DEBUG_IO("SWD_ACK_WAIT");
591                 swd_clear_sticky_errors();
592                 return;
593         case SWD_ACK_FAULT:
594                 LOG_DEBUG_IO("SWD_ACK_FAULT");
595                 queued_retval = ack;
596                 return;
597         default:
598                 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
599                 queued_retval = ack;
600                 return;
601         }
602 err_out:
603         queued_retval = err;
604 }
605
606 static void xlnx_pcie_xvc_swd_write_reg(uint8_t cmd, uint32_t value,
607                                         uint32_t ap_delay_clk)
608 {
609         uint32_t res, ack;
610         int err;
611
612         assert(!(cmd & SWD_CMD_RNW));
613
614         cmd |= SWD_CMD_START | SWD_CMD_PARK;
615         /* cmd + trn + ack */
616         err = xlnx_pcie_xvc_transact(13, cmd, 0, &res);
617         if (err != ERROR_OK)
618                 goto err_out;
619
620         ack = MASK_ACK(res);
621
622         /* write data */
623         err = xlnx_pcie_xvc_transact(32, value, 0, NULL);
624         if (err != ERROR_OK)
625                 goto err_out;
626
627         /* parity + trn */
628         err = xlnx_pcie_xvc_transact(2, parity_u32(value), 0, NULL);
629         if (err != ERROR_OK)
630                 goto err_out;
631
632         LOG_DEBUG("%s %s %s reg %X = %08"PRIx32,
633                   ack == SWD_ACK_OK ? "OK" : ack == SWD_ACK_WAIT ?
634                   "WAIT" : ack == SWD_ACK_FAULT ? "FAULT" : "JUNK",
635                   cmd & SWD_CMD_APNDP ? "AP" : "DP",
636                   cmd & SWD_CMD_RNW ? "read" : "write",
637                   (cmd & SWD_CMD_A32) >> 1,
638                   value);
639
640         switch (ack) {
641         case SWD_ACK_OK:
642                 if (cmd & SWD_CMD_APNDP)
643                         err = xlnx_pcie_xvc_transact(ap_delay_clk, 0, 0, NULL);
644                 queued_retval = err;
645                 return;
646         case SWD_ACK_WAIT:
647                 LOG_DEBUG_IO("SWD_ACK_WAIT");
648                 swd_clear_sticky_errors();
649                 return;
650         case SWD_ACK_FAULT:
651                 LOG_DEBUG_IO("SWD_ACK_FAULT");
652                 queued_retval = ack;
653                 return;
654         default:
655                 LOG_DEBUG_IO("No valid acknowledge: ack=%02"PRIx32, ack);
656                 queued_retval = ack;
657                 return;
658         }
659
660 err_out:
661         queued_retval = err;
662 }
663
664 static int xlnx_pcie_xvc_swd_run_queue(void)
665 {
666         int err;
667
668         /* we want at least 8 idle cycles between each transaction */
669         err = xlnx_pcie_xvc_transact(8, 0, 0, NULL);
670         if (err != ERROR_OK)
671                 return err;
672
673         err = queued_retval;
674         queued_retval = ERROR_OK;
675         LOG_DEBUG("SWD queue return value: %02x", err);
676
677         return err;
678 }
679
680 static int xlnx_pcie_xvc_swd_init(void)
681 {
682         return ERROR_OK;
683 }
684
685 static const struct swd_driver xlnx_pcie_xvc_swd_ops = {
686         .init = xlnx_pcie_xvc_swd_init,
687         .switch_seq = xlnx_pcie_xvc_swd_switch_seq,
688         .read_reg = xlnx_pcie_xvc_swd_read_reg,
689         .write_reg = xlnx_pcie_xvc_swd_write_reg,
690         .run = xlnx_pcie_xvc_swd_run_queue,
691 };
692
693 static const char * const xlnx_pcie_xvc_transports[] = { "jtag", "swd", NULL };
694
695 struct adapter_driver xlnx_pcie_xvc_adapter_driver = {
696         .name = "xlnx_pcie_xvc",
697         .transports = xlnx_pcie_xvc_transports,
698         .commands = xlnx_pcie_xvc_command_handlers,
699
700         .init = &xlnx_pcie_xvc_init,
701         .quit = &xlnx_pcie_xvc_quit,
702
703         .jtag_ops = &xlnx_pcie_xvc_jtag_ops,
704         .swd_ops  = &xlnx_pcie_xvc_swd_ops,
705 };