1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2006 by Magnus Lundin *
7 * Copyright (C) 2008 by Spencer Oliver *
8 * spen@spen-soft.co.uk *
10 * Copyright (C) 2009-2010 by Oyvind Harboe *
11 * oyvind.harboe@zylin.com *
13 * Copyright (C) 2009-2010 by David Brownell *
15 * Copyright (C) 2013 by Andreas Fritiofson *
16 * andreas.fritiofson@gmail.com *
18 * Copyright (C) 2019-2021, Ampere Computing LLC *
19 ***************************************************************************/
23 * This file implements support for the ARM Debug Interface version 5 (ADIv5)
24 * debugging architecture. Compared with previous versions, this includes
25 * a low pin-count Serial Wire Debug (SWD) alternative to JTAG for message
26 * transport, and focuses on memory mapped resources as defined by the
27 * CoreSight architecture.
29 * A key concept in ADIv5 is the Debug Access Port, or DAP. A DAP has two
30 * basic components: a Debug Port (DP) transporting messages to and from a
31 * debugger, and an Access Port (AP) accessing resources. Three types of DP
32 * are defined. One uses only JTAG for communication, and is called JTAG-DP.
33 * One uses only SWD for communication, and is called SW-DP. The third can
34 * use either SWD or JTAG, and is called SWJ-DP. The most common type of AP
35 * is used to access memory mapped resources and is called a MEM-AP. Also a
36 * JTAG-AP is also defined, bridging to JTAG resources; those are uncommon.
38 * This programming interface allows DAP pipelined operations through a
39 * transaction queue. This primarily affects AP operations (such as using
40 * a MEM-AP to access memory or registers). If the current transaction has
41 * not finished by the time the next one must begin, and the ORUNDETECT bit
42 * is set in the DP_CTRL_STAT register, the SSTICKYORUN status is set and
43 * further AP operations will fail. There are two basic methods to avoid
44 * such overrun errors. One involves polling for status instead of using
45 * transaction pipelining. The other involves adding delays to ensure the
46 * AP has enough time to complete one operation before starting the next
47 * one. (For JTAG these delays are controlled by memaccess_tck.)
51 * Relevant specifications from ARM include:
53 * ARM(tm) Debug Interface v5 Architecture Specification ARM IHI 0031E
54 * CoreSight(tm) v1.0 Architecture Specification ARM IHI 0029B
56 * CoreSight(tm) DAP-Lite TRM, ARM DDI 0316D
57 * Cortex-M3(tm) TRM, ARM DDI 0337G
64 #include "jtag/interface.h"
66 #include "arm_adi_v5.h"
67 #include "arm_coresight.h"
69 #include "transport/transport.h"
70 #include <helper/align.h>
71 #include <helper/jep106.h>
72 #include <helper/time_support.h>
73 #include <helper/list.h>
74 #include <helper/jim-nvp.h>
76 /* ARM ADI Specification requires at least 10 bits used for TAR autoincrement */
79 uint32_t tar_block_size(uint32_t address)
80 Return the largest block starting at address that does not cross a tar block size alignment boundary
82 static uint32_t max_tar_block_size(uint32_t tar_autoincr_block, target_addr_t address)
84 return tar_autoincr_block - ((tar_autoincr_block - 1) & address);
87 /***************************************************************************
89 * DP and MEM-AP register access through APACC and DPACC *
91 ***************************************************************************/
93 static int mem_ap_setup_csw(struct adiv5_ap *ap, uint32_t csw)
95 csw |= ap->csw_default;
97 if (csw != ap->csw_value) {
98 /* LOG_DEBUG("DAP: Set CSW %x",csw); */
99 int retval = dap_queue_ap_write(ap, MEM_AP_REG_CSW(ap->dap), csw);
100 if (retval != ERROR_OK) {
109 static int mem_ap_setup_tar(struct adiv5_ap *ap, target_addr_t tar)
111 if (!ap->tar_valid || tar != ap->tar_value) {
112 /* LOG_DEBUG("DAP: Set TAR %x",tar); */
113 int retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR(ap->dap), (uint32_t)(tar & 0xffffffffUL));
114 if (retval == ERROR_OK && is_64bit_ap(ap)) {
115 /* See if bits 63:32 of tar is different from last setting */
116 if ((ap->tar_value >> 32) != (tar >> 32))
117 retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR64(ap->dap), (uint32_t)(tar >> 32));
119 if (retval != ERROR_OK) {
120 ap->tar_valid = false;
124 ap->tar_valid = true;
129 static int mem_ap_read_tar(struct adiv5_ap *ap, target_addr_t *tar)
134 int retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR(ap->dap), &lower);
135 if (retval == ERROR_OK && is_64bit_ap(ap))
136 retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR64(ap->dap), &upper);
138 if (retval != ERROR_OK) {
139 ap->tar_valid = false;
143 retval = dap_run(ap->dap);
144 if (retval != ERROR_OK) {
145 ap->tar_valid = false;
149 *tar = (((target_addr_t)upper) << 32) | (target_addr_t)lower;
151 ap->tar_value = *tar;
152 ap->tar_valid = true;
156 static uint32_t mem_ap_get_tar_increment(struct adiv5_ap *ap)
158 switch (ap->csw_value & CSW_ADDRINC_MASK) {
159 case CSW_ADDRINC_SINGLE:
160 switch (ap->csw_value & CSW_SIZE_MASK) {
170 case CSW_ADDRINC_PACKED:
176 /* mem_ap_update_tar_cache is called after an access to MEM_AP_REG_DRW
178 static void mem_ap_update_tar_cache(struct adiv5_ap *ap)
183 uint32_t inc = mem_ap_get_tar_increment(ap);
184 if (inc >= max_tar_block_size(ap->tar_autoincr_block, ap->tar_value))
185 ap->tar_valid = false;
187 ap->tar_value += inc;
191 * Queue transactions setting up transfer parameters for the
192 * currently selected MEM-AP.
194 * Subsequent transfers using registers like MEM_AP_REG_DRW or MEM_AP_REG_BD2
195 * initiate data reads or writes using memory or peripheral addresses.
196 * If the CSW is configured for it, the TAR may be automatically
197 * incremented after each transfer.
199 * @param ap The MEM-AP.
200 * @param csw MEM-AP Control/Status Word (CSW) register to assign. If this
201 * matches the cached value, the register is not changed.
202 * @param tar MEM-AP Transfer Address Register (TAR) to assign. If this
203 * matches the cached address, the register is not changed.
205 * @return ERROR_OK if the transaction was properly queued, else a fault code.
207 static int mem_ap_setup_transfer(struct adiv5_ap *ap, uint32_t csw, target_addr_t tar)
210 retval = mem_ap_setup_csw(ap, csw);
211 if (retval != ERROR_OK)
213 retval = mem_ap_setup_tar(ap, tar);
214 if (retval != ERROR_OK)
220 * Asynchronous (queued) read of a word from memory or a system register.
222 * @param ap The MEM-AP to access.
223 * @param address Address of the 32-bit word to read; it must be
224 * readable by the currently selected MEM-AP.
225 * @param value points to where the word will be stored when the
226 * transaction queue is flushed (assuming no errors).
228 * @return ERROR_OK for success. Otherwise a fault code.
230 int mem_ap_read_u32(struct adiv5_ap *ap, target_addr_t address,
235 /* Use banked addressing (REG_BDx) to avoid some link traffic
236 * (updating TAR) when reading several consecutive addresses.
238 retval = mem_ap_setup_transfer(ap,
239 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
240 address & 0xFFFFFFFFFFFFFFF0ull);
241 if (retval != ERROR_OK)
244 return dap_queue_ap_read(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC), value);
248 * Synchronous read of a word from memory or a system register.
249 * As a side effect, this flushes any queued transactions.
251 * @param ap The MEM-AP to access.
252 * @param address Address of the 32-bit word to read; it must be
253 * readable by the currently selected MEM-AP.
254 * @param value points to where the result will be stored.
256 * @return ERROR_OK for success; *value holds the result.
257 * Otherwise a fault code.
259 int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
264 retval = mem_ap_read_u32(ap, address, value);
265 if (retval != ERROR_OK)
268 return dap_run(ap->dap);
272 * Asynchronous (queued) write of a word to memory or a system register.
274 * @param ap The MEM-AP to access.
275 * @param address Address to be written; it must be writable by
276 * the currently selected MEM-AP.
277 * @param value Word that will be written to the address when transaction
278 * queue is flushed (assuming no errors).
280 * @return ERROR_OK for success. Otherwise a fault code.
282 int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address,
287 /* Use banked addressing (REG_BDx) to avoid some link traffic
288 * (updating TAR) when writing several consecutive addresses.
290 retval = mem_ap_setup_transfer(ap,
291 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
292 address & 0xFFFFFFFFFFFFFFF0ull);
293 if (retval != ERROR_OK)
296 return dap_queue_ap_write(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC),
301 * Synchronous write of a word to memory or a system register.
302 * As a side effect, this flushes any queued transactions.
304 * @param ap The MEM-AP to access.
305 * @param address Address to be written; it must be writable by
306 * the currently selected MEM-AP.
307 * @param value Word that will be written.
309 * @return ERROR_OK for success; the data was written. Otherwise a fault code.
311 int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
314 int retval = mem_ap_write_u32(ap, address, value);
316 if (retval != ERROR_OK)
319 return dap_run(ap->dap);
323 * Synchronous write of a block of memory, using a specific access size.
325 * @param ap The MEM-AP to access.
326 * @param buffer The data buffer to write. No particular alignment is assumed.
327 * @param size Which access size to use, in bytes. 1, 2 or 4.
328 * @param count The number of writes to do (in size units, not bytes).
329 * @param address Address to be written; it must be writable by the currently selected MEM-AP.
330 * @param addrinc Whether the target address should be increased for each write or not. This
331 * should normally be true, except when writing to e.g. a FIFO.
332 * @return ERROR_OK on success, otherwise an error code.
334 static int mem_ap_write(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count,
335 target_addr_t address, bool addrinc)
337 struct adiv5_dap *dap = ap->dap;
338 size_t nbytes = size * count;
339 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
341 target_addr_t addr_xor;
342 int retval = ERROR_OK;
344 /* TI BE-32 Quirks mode:
345 * Writes on big-endian TMS570 behave very strangely. Observed behavior:
346 * size write address bytes written in order
347 * 4 TAR ^ 0 (val >> 24), (val >> 16), (val >> 8), (val)
348 * 2 TAR ^ 2 (val >> 8), (val)
350 * For example, if you attempt to write a single byte to address 0, the processor
351 * will actually write a byte to address 3.
353 * To make writes of size < 4 work as expected, we xor a value with the address before
354 * setting the TAP, and we set the TAP after every transfer rather then relying on
355 * address increment. */
358 csw_size = CSW_32BIT;
360 } else if (size == 2) {
361 csw_size = CSW_16BIT;
362 addr_xor = dap->ti_be_32_quirks ? 2 : 0;
363 } else if (size == 1) {
365 addr_xor = dap->ti_be_32_quirks ? 3 : 0;
367 return ERROR_TARGET_UNALIGNED_ACCESS;
370 if (ap->unaligned_access_bad && (address % size != 0))
371 return ERROR_TARGET_UNALIGNED_ACCESS;
374 uint32_t this_size = size;
376 /* Select packed transfer if possible */
377 if (addrinc && ap->packed_transfers && nbytes >= 4
378 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
380 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
382 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
385 if (retval != ERROR_OK)
388 retval = mem_ap_setup_tar(ap, address ^ addr_xor);
389 if (retval != ERROR_OK)
392 /* How many source bytes each transfer will consume, and their location in the DRW,
393 * depends on the type of transfer and alignment. See ARM document IHI0031C. */
394 uint32_t outvalue = 0;
395 uint32_t drw_byte_idx = address;
396 if (dap->ti_be_32_quirks) {
399 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
400 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
401 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
402 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx & 3) ^ addr_xor);
405 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx++ & 3) ^ addr_xor);
406 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx & 3) ^ addr_xor);
409 outvalue |= (uint32_t)*buffer++ << 8 * (0 ^ (drw_byte_idx & 3) ^ addr_xor);
412 } else if (dap->nu_npcx_quirks) {
415 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
416 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
417 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
418 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
421 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
422 outvalue |= (uint32_t)*(buffer+1) << 8 * (drw_byte_idx++ & 3);
423 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
424 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
427 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
428 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
429 outvalue |= (uint32_t)*buffer << 8 * (drw_byte_idx++ & 3);
430 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
435 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
436 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
439 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
442 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
448 retval = dap_queue_ap_write(ap, MEM_AP_REG_DRW(dap), outvalue);
449 if (retval != ERROR_OK)
452 mem_ap_update_tar_cache(ap);
454 address += this_size;
457 /* REVISIT: Might want to have a queued version of this function that does not run. */
458 if (retval == ERROR_OK)
459 retval = dap_run(dap);
461 if (retval != ERROR_OK) {
463 if (mem_ap_read_tar(ap, &tar) == ERROR_OK)
464 LOG_ERROR("Failed to write memory at " TARGET_ADDR_FMT, tar);
466 LOG_ERROR("Failed to write memory and, additionally, failed to find out where");
473 * Synchronous read of a block of memory, using a specific access size.
475 * @param ap The MEM-AP to access.
476 * @param buffer The data buffer to receive the data. No particular alignment is assumed.
477 * @param size Which access size to use, in bytes. 1, 2 or 4.
478 * @param count The number of reads to do (in size units, not bytes).
479 * @param adr Address to be read; it must be readable by the currently selected MEM-AP.
480 * @param addrinc Whether the target address should be increased after each read or not. This
481 * should normally be true, except when reading from e.g. a FIFO.
482 * @return ERROR_OK on success, otherwise an error code.
484 static int mem_ap_read(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count,
485 target_addr_t adr, bool addrinc)
487 struct adiv5_dap *dap = ap->dap;
488 size_t nbytes = size * count;
489 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
491 target_addr_t address = adr;
492 int retval = ERROR_OK;
494 /* TI BE-32 Quirks mode:
495 * Reads on big-endian TMS570 behave strangely differently than writes.
496 * They read from the physical address requested, but with DRW byte-reversed.
497 * For example, a byte read from address 0 will place the result in the high bytes of DRW.
498 * Also, packed 8-bit and 16-bit transfers seem to sometimes return garbage in some bytes,
502 csw_size = CSW_32BIT;
504 csw_size = CSW_16BIT;
508 return ERROR_TARGET_UNALIGNED_ACCESS;
510 if (ap->unaligned_access_bad && (adr % size != 0))
511 return ERROR_TARGET_UNALIGNED_ACCESS;
513 /* Allocate buffer to hold the sequence of DRW reads that will be made. This is a significant
514 * over-allocation if packed transfers are going to be used, but determining the real need at
515 * this point would be messy. */
516 uint32_t *read_buf = calloc(count, sizeof(uint32_t));
517 /* Multiplication count * sizeof(uint32_t) may overflow, calloc() is safe */
518 uint32_t *read_ptr = read_buf;
520 LOG_ERROR("Failed to allocate read buffer");
524 /* Queue up all reads. Each read will store the entire DRW word in the read buffer. How many
525 * useful bytes it contains, and their location in the word, depends on the type of transfer
528 uint32_t this_size = size;
530 /* Select packed transfer if possible */
531 if (addrinc && ap->packed_transfers && nbytes >= 4
532 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
534 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
536 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
538 if (retval != ERROR_OK)
541 retval = mem_ap_setup_tar(ap, address);
542 if (retval != ERROR_OK)
545 retval = dap_queue_ap_read(ap, MEM_AP_REG_DRW(dap), read_ptr++);
546 if (retval != ERROR_OK)
551 address += this_size;
553 mem_ap_update_tar_cache(ap);
556 if (retval == ERROR_OK)
557 retval = dap_run(dap);
561 nbytes = size * count;
564 /* If something failed, read TAR to find out how much data was successfully read, so we can
565 * at least give the caller what we have. */
566 if (retval != ERROR_OK) {
568 if (mem_ap_read_tar(ap, &tar) == ERROR_OK) {
569 /* TAR is incremented after failed transfer on some devices (eg Cortex-M4) */
570 LOG_ERROR("Failed to read memory at " TARGET_ADDR_FMT, tar);
571 if (nbytes > tar - address)
572 nbytes = tar - address;
574 LOG_ERROR("Failed to read memory and, additionally, failed to find out where");
579 /* Replay loop to populate caller's buffer from the correct word and byte lane */
581 uint32_t this_size = size;
583 if (addrinc && ap->packed_transfers && nbytes >= 4
584 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
588 if (dap->ti_be_32_quirks) {
591 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
592 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
595 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
598 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
603 *buffer++ = *read_ptr >> 8 * (address++ & 3);
604 *buffer++ = *read_ptr >> 8 * (address++ & 3);
607 *buffer++ = *read_ptr >> 8 * (address++ & 3);
610 *buffer++ = *read_ptr >> 8 * (address++ & 3);
622 int mem_ap_read_buf(struct adiv5_ap *ap,
623 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
625 return mem_ap_read(ap, buffer, size, count, address, true);
628 int mem_ap_write_buf(struct adiv5_ap *ap,
629 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
631 return mem_ap_write(ap, buffer, size, count, address, true);
634 int mem_ap_read_buf_noincr(struct adiv5_ap *ap,
635 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
637 return mem_ap_read(ap, buffer, size, count, address, false);
640 int mem_ap_write_buf_noincr(struct adiv5_ap *ap,
641 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
643 return mem_ap_write(ap, buffer, size, count, address, false);
646 /*--------------------------------------------------------------------------*/
649 #define DAP_POWER_DOMAIN_TIMEOUT (10)
651 /*--------------------------------------------------------------------------*/
654 * Invalidate cached DP select and cached TAR and CSW of all APs
656 void dap_invalidate_cache(struct adiv5_dap *dap)
658 dap->select = DP_SELECT_INVALID;
659 dap->last_read = NULL;
662 for (i = 0; i <= DP_APSEL_MAX; i++) {
663 /* force csw and tar write on the next mem-ap access */
664 dap->ap[i].tar_valid = false;
665 dap->ap[i].csw_value = 0;
670 * Initialize a DAP. This sets up the power domains, prepares the DP
671 * for further use and activates overrun checking.
673 * @param dap The DAP being initialized.
675 int dap_dp_init(struct adiv5_dap *dap)
679 LOG_DEBUG("%s", adiv5_dap_name(dap));
681 dap->do_reconnect = false;
682 dap_invalidate_cache(dap);
685 * Early initialize dap->dp_ctrl_stat.
686 * In jtag mode only, if the following queue run (in dap_dp_poll_register)
687 * fails and sets the sticky error, it will trigger the clearing
688 * of the sticky. Without this initialization system and debug power
689 * would be disabled while clearing the sticky error bit.
691 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
694 * This write operation clears the sticky error bit in jtag mode only and
695 * is ignored in swd mode. It also powers-up system and debug domains in
696 * both jtag and swd modes, if not done before.
698 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat | SSTICKYERR);
699 if (retval != ERROR_OK)
702 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
703 if (retval != ERROR_OK)
706 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
707 if (retval != ERROR_OK)
710 /* Check that we have debug power domains activated */
711 LOG_DEBUG("DAP: wait CDBGPWRUPACK");
712 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
713 CDBGPWRUPACK, CDBGPWRUPACK,
714 DAP_POWER_DOMAIN_TIMEOUT);
715 if (retval != ERROR_OK)
718 if (!dap->ignore_syspwrupack) {
719 LOG_DEBUG("DAP: wait CSYSPWRUPACK");
720 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
721 CSYSPWRUPACK, CSYSPWRUPACK,
722 DAP_POWER_DOMAIN_TIMEOUT);
723 if (retval != ERROR_OK)
727 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
728 if (retval != ERROR_OK)
731 /* With debug power on we can activate OVERRUN checking */
732 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ | CORUNDETECT;
733 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
734 if (retval != ERROR_OK)
736 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
737 if (retval != ERROR_OK)
740 retval = dap_run(dap);
741 if (retval != ERROR_OK)
748 * Initialize a DAP or do reconnect if DAP is not accessible.
750 * @param dap The DAP being initialized.
752 int dap_dp_init_or_reconnect(struct adiv5_dap *dap)
754 LOG_DEBUG("%s", adiv5_dap_name(dap));
757 * Early initialize dap->dp_ctrl_stat.
758 * In jtag mode only, if the following atomic reads fail and set the
759 * sticky error, it will trigger the clearing of the sticky. Without this
760 * initialization system and debug power would be disabled while clearing
761 * the sticky error bit.
763 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
765 dap->do_reconnect = false;
767 dap_dp_read_atomic(dap, DP_CTRL_STAT, NULL);
768 if (dap->do_reconnect) {
769 /* dap connect calls dap_dp_init() after transport dependent initialization */
770 return dap->ops->connect(dap);
772 return dap_dp_init(dap);
777 * Initialize a DAP. This sets up the power domains, prepares the DP
778 * for further use, and arranges to use AP #0 for all AP operations
779 * until dap_ap-select() changes that policy.
781 * @param ap The MEM-AP being initialized.
783 int mem_ap_init(struct adiv5_ap *ap)
785 /* check that we support packed transfers */
788 struct adiv5_dap *dap = ap->dap;
790 /* Set ap->cfg_reg before calling mem_ap_setup_transfer(). */
791 /* mem_ap_setup_transfer() needs to know if the MEM_AP supports LPAE. */
792 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &cfg);
793 if (retval != ERROR_OK)
796 retval = dap_run(dap);
797 if (retval != ERROR_OK)
801 ap->tar_valid = false;
802 ap->csw_value = 0; /* force csw and tar write */
803 retval = mem_ap_setup_transfer(ap, CSW_8BIT | CSW_ADDRINC_PACKED, 0);
804 if (retval != ERROR_OK)
807 retval = dap_queue_ap_read(ap, MEM_AP_REG_CSW(dap), &csw);
808 if (retval != ERROR_OK)
811 retval = dap_run(dap);
812 if (retval != ERROR_OK)
815 if (csw & CSW_ADDRINC_PACKED)
816 ap->packed_transfers = true;
818 ap->packed_transfers = false;
820 /* Packed transfers on TI BE-32 processors do not work correctly in
822 if (dap->ti_be_32_quirks)
823 ap->packed_transfers = false;
825 LOG_DEBUG("MEM_AP Packed Transfers: %s",
826 ap->packed_transfers ? "enabled" : "disabled");
828 /* The ARM ADI spec leaves implementation-defined whether unaligned
829 * memory accesses work, only work partially, or cause a sticky error.
830 * On TI BE-32 processors, reads seem to return garbage in some bytes
831 * and unaligned writes seem to cause a sticky error.
832 * TODO: it would be nice to have a way to detect whether unaligned
833 * operations are supported on other processors. */
834 ap->unaligned_access_bad = dap->ti_be_32_quirks;
836 LOG_DEBUG("MEM_AP CFG: large data %d, long address %d, big-endian %d",
837 !!(cfg & MEM_AP_REG_CFG_LD), !!(cfg & MEM_AP_REG_CFG_LA), !!(cfg & MEM_AP_REG_CFG_BE));
843 * Put the debug link into SWD mode, if the target supports it.
844 * The link's initial mode may be either JTAG (for example,
845 * with SWJ-DP after reset) or SWD.
847 * Note that targets using the JTAG-DP do not support SWD, and that
848 * some targets which could otherwise support it may have been
849 * configured to disable SWD signaling
851 * @param dap The DAP used
852 * @return ERROR_OK or else a fault code.
854 int dap_to_swd(struct adiv5_dap *dap)
856 LOG_DEBUG("Enter SWD mode");
858 return dap_send_sequence(dap, JTAG_TO_SWD);
862 * Put the debug link into JTAG mode, if the target supports it.
863 * The link's initial mode may be either SWD or JTAG.
865 * Note that targets implemented with SW-DP do not support JTAG, and
866 * that some targets which could otherwise support it may have been
867 * configured to disable JTAG signaling
869 * @param dap The DAP used
870 * @return ERROR_OK or else a fault code.
872 int dap_to_jtag(struct adiv5_dap *dap)
874 LOG_DEBUG("Enter JTAG mode");
876 return dap_send_sequence(dap, SWD_TO_JTAG);
879 /* CID interpretation -- see ARM IHI 0029E table B2-7
880 * and ARM IHI 0031E table D1-2.
882 * From 2009/11/25 commit 21378f58b604:
883 * "OptimoDE DESS" is ARM's semicustom DSPish stuff.
884 * Let's keep it as is, for the time being
886 static const char *class_description[16] = {
887 [0x0] = "Generic verification component",
896 [0x9] = "CoreSight component",
898 [0xB] = "Peripheral Test Block",
900 [0xD] = "OptimoDE DESS", /* see above */
901 [0xE] = "Generic IP component",
902 [0xF] = "CoreLink, PrimeCell or System component",
905 #define ARCH_ID(architect, archid) ( \
906 (((architect) << ARM_CS_C9_DEVARCH_ARCHITECT_SHIFT) & ARM_CS_C9_DEVARCH_ARCHITECT_MASK) | \
907 (((archid) << ARM_CS_C9_DEVARCH_ARCHID_SHIFT) & ARM_CS_C9_DEVARCH_ARCHID_MASK) \
910 static const struct {
912 const char *description;
913 } class0x9_devarch[] = {
914 /* keep same unsorted order as in ARM IHI0029E */
915 { ARCH_ID(ARM_ID, 0x0A00), "RAS architecture" },
916 { ARCH_ID(ARM_ID, 0x1A01), "Instrumentation Trace Macrocell (ITM) architecture" },
917 { ARCH_ID(ARM_ID, 0x1A02), "DWT architecture" },
918 { ARCH_ID(ARM_ID, 0x1A03), "Flash Patch and Breakpoint unit (FPB) architecture" },
919 { ARCH_ID(ARM_ID, 0x2A04), "Processor debug architecture (ARMv8-M)" },
920 { ARCH_ID(ARM_ID, 0x6A05), "Processor debug architecture (ARMv8-R)" },
921 { ARCH_ID(ARM_ID, 0x0A10), "PC sample-based profiling" },
922 { ARCH_ID(ARM_ID, 0x4A13), "Embedded Trace Macrocell (ETM) architecture" },
923 { ARCH_ID(ARM_ID, 0x1A14), "Cross Trigger Interface (CTI) architecture" },
924 { ARCH_ID(ARM_ID, 0x6A15), "Processor debug architecture (v8.0-A)" },
925 { ARCH_ID(ARM_ID, 0x7A15), "Processor debug architecture (v8.1-A)" },
926 { ARCH_ID(ARM_ID, 0x8A15), "Processor debug architecture (v8.2-A)" },
927 { ARCH_ID(ARM_ID, 0x2A16), "Processor Performance Monitor (PMU) architecture" },
928 { ARCH_ID(ARM_ID, 0x0A17), "Memory Access Port v2 architecture" },
929 { ARCH_ID(ARM_ID, 0x0A27), "JTAG Access Port v2 architecture" },
930 { ARCH_ID(ARM_ID, 0x0A31), "Basic trace router" },
931 { ARCH_ID(ARM_ID, 0x0A37), "Power requestor" },
932 { ARCH_ID(ARM_ID, 0x0A47), "Unknown Access Port v2 architecture" },
933 { ARCH_ID(ARM_ID, 0x0A50), "HSSTP architecture" },
934 { ARCH_ID(ARM_ID, 0x0A63), "System Trace Macrocell (STM) architecture" },
935 { ARCH_ID(ARM_ID, 0x0A75), "CoreSight ELA architecture" },
936 { ARCH_ID(ARM_ID, 0x0AF7), "CoreSight ROM architecture" },
939 #define DEVARCH_ID_MASK (ARM_CS_C9_DEVARCH_ARCHITECT_MASK | ARM_CS_C9_DEVARCH_ARCHID_MASK)
940 #define DEVARCH_MEM_AP ARCH_ID(ARM_ID, 0x0A17)
941 #define DEVARCH_ROM_C_0X9 ARCH_ID(ARM_ID, 0x0AF7)
942 #define DEVARCH_UNKNOWN_V2 ARCH_ID(ARM_ID, 0x0A47)
944 static const char *class0x9_devarch_description(uint32_t devarch)
946 if (!(devarch & ARM_CS_C9_DEVARCH_PRESENT))
947 return "not present";
949 for (unsigned int i = 0; i < ARRAY_SIZE(class0x9_devarch); i++)
950 if ((devarch & DEVARCH_ID_MASK) == class0x9_devarch[i].arch_id)
951 return class0x9_devarch[i].description;
956 static const struct {
958 const char *description;
960 { AP_TYPE_JTAG_AP, "JTAG-AP" },
961 { AP_TYPE_COM_AP, "COM-AP" },
962 { AP_TYPE_AHB3_AP, "MEM-AP AHB3" },
963 { AP_TYPE_APB_AP, "MEM-AP APB2 or APB3" },
964 { AP_TYPE_AXI_AP, "MEM-AP AXI3 or AXI4" },
965 { AP_TYPE_AHB5_AP, "MEM-AP AHB5" },
966 { AP_TYPE_APB4_AP, "MEM-AP APB4" },
967 { AP_TYPE_AXI5_AP, "MEM-AP AXI5" },
968 { AP_TYPE_AHB5H_AP, "MEM-AP AHB5 with enhanced HPROT" },
971 static const char *ap_type_to_description(enum ap_type type)
973 for (unsigned int i = 0; i < ARRAY_SIZE(ap_types); i++)
974 if (type == ap_types[i].type)
975 return ap_types[i].description;
980 bool is_ap_num_valid(struct adiv5_dap *dap, uint64_t ap_num)
985 /* no autodetection, by now, so uninitialized is equivalent to ADIv5 for
986 * backward compatibility */
987 if (!is_adiv6(dap)) {
988 if (ap_num > DP_APSEL_MAX)
994 if (ap_num & 0x0fffULL)
997 if (ap_num & ((~0ULL) << dap->asize))
1006 * This function checks the ID for each access port to find the requested Access Port type
1007 * It also calls dap_get_ap() to increment the AP refcount
1009 int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
1011 if (is_adiv6(dap)) {
1012 /* TODO: scan the ROM table and detect the AP available */
1013 LOG_DEBUG("On ADIv6 we cannot scan all the possible AP");
1017 /* Maximum AP number is 255 since the SELECT register is 8 bits */
1018 for (unsigned int ap_num = 0; ap_num <= DP_APSEL_MAX; ap_num++) {
1019 struct adiv5_ap *ap = dap_get_ap(dap, ap_num);
1023 /* read the IDR register of the Access Port */
1024 uint32_t id_val = 0;
1026 int retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &id_val);
1027 if (retval != ERROR_OK) {
1032 retval = dap_run(dap);
1034 /* Reading register for a non-existent AP should not cause an error,
1035 * but just to be sure, try to continue searching if an error does happen.
1037 if (retval == ERROR_OK && (id_val & AP_TYPE_MASK) == type_to_find) {
1038 LOG_DEBUG("Found %s at AP index: %d (IDR=0x%08" PRIX32 ")",
1039 ap_type_to_description(type_to_find),
1048 LOG_DEBUG("No %s found", ap_type_to_description(type_to_find));
1052 static inline bool is_ap_in_use(struct adiv5_ap *ap)
1054 return ap->refcount > 0 || ap->config_ap_never_release;
1057 static struct adiv5_ap *_dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1059 if (!is_ap_num_valid(dap, ap_num)) {
1060 LOG_ERROR("Invalid AP#0x%" PRIx64, ap_num);
1063 if (is_adiv6(dap)) {
1064 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1065 struct adiv5_ap *ap = &dap->ap[i];
1066 if (is_ap_in_use(ap) && ap->ap_num == ap_num) {
1071 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1072 struct adiv5_ap *ap = &dap->ap[i];
1073 if (!is_ap_in_use(ap)) {
1074 ap->ap_num = ap_num;
1079 LOG_ERROR("No more AP available!");
1084 struct adiv5_ap *ap = &dap->ap[ap_num];
1085 ap->ap_num = ap_num;
1090 /* Return AP with specified ap_num. Increment AP refcount */
1091 struct adiv5_ap *dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1093 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1095 LOG_DEBUG("refcount AP#0x%" PRIx64 " get %u", ap_num, ap->refcount);
1099 /* Return AP with specified ap_num. Increment AP refcount and keep it non-zero */
1100 struct adiv5_ap *dap_get_config_ap(struct adiv5_dap *dap, uint64_t ap_num)
1102 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1104 ap->config_ap_never_release = true;
1105 LOG_DEBUG("refcount AP#0x%" PRIx64 " get_config %u", ap_num, ap->refcount);
1110 /* Decrement AP refcount and release the AP when refcount reaches zero */
1111 int dap_put_ap(struct adiv5_ap *ap)
1113 if (ap->refcount == 0) {
1114 LOG_ERROR("BUG: refcount AP#0x%" PRIx64 " put underflow", ap->ap_num);
1120 LOG_DEBUG("refcount AP#0x%" PRIx64 " put %u", ap->ap_num, ap->refcount);
1121 if (!is_ap_in_use(ap)) {
1122 /* defaults from dap_instance_init() */
1123 ap->ap_num = DP_APSEL_INVALID;
1124 ap->memaccess_tck = 255;
1125 ap->tar_autoincr_block = (1 << 10);
1126 ap->csw_default = CSW_AHB_DEFAULT;
1127 ap->cfg_reg = MEM_AP_REG_CFG_INVALID;
1132 static int dap_get_debugbase(struct adiv5_ap *ap,
1133 target_addr_t *dbgbase, uint32_t *apid)
1135 struct adiv5_dap *dap = ap->dap;
1137 uint32_t baseptr_upper, baseptr_lower;
1139 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID) {
1140 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
1141 if (retval != ERROR_OK)
1144 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseptr_lower);
1145 if (retval != ERROR_OK)
1147 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), apid);
1148 if (retval != ERROR_OK)
1150 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
1151 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap)) {
1152 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseptr_upper);
1153 if (retval != ERROR_OK)
1157 retval = dap_run(dap);
1158 if (retval != ERROR_OK)
1161 if (!is_64bit_ap(ap))
1163 *dbgbase = (((target_addr_t)baseptr_upper) << 32) | baseptr_lower;
1168 int adiv6_dap_read_baseptr(struct command_invocation *cmd, struct adiv5_dap *dap, uint64_t *baseptr)
1170 uint32_t baseptr_lower, baseptr_upper = 0;
1173 if (dap->asize > 32) {
1174 retval = dap_queue_dp_read(dap, DP_BASEPTR1, &baseptr_upper);
1175 if (retval != ERROR_OK)
1179 retval = dap_dp_read_atomic(dap, DP_BASEPTR0, &baseptr_lower);
1180 if (retval != ERROR_OK)
1183 if ((baseptr_lower & DP_BASEPTR0_VALID) != DP_BASEPTR0_VALID) {
1184 command_print(cmd, "System root table not present");
1188 baseptr_lower &= ~0x0fff;
1189 *baseptr = (((uint64_t)baseptr_upper) << 32) | baseptr_lower;
1195 * Method to access the CoreSight component.
1196 * On ADIv5, CoreSight components are on the bus behind a MEM-AP.
1197 * On ADIv6, CoreSight components can either be on the bus behind a MEM-AP
1198 * or directly in the AP.
1200 enum coresight_access_mode {
1205 /** Holds registers and coordinates of a CoreSight component */
1206 struct cs_component_vals {
1207 struct adiv5_ap *ap;
1208 target_addr_t component_base;
1213 uint32_t devtype_memtype;
1214 enum coresight_access_mode mode;
1218 * Helper to read CoreSight component's registers, either on the bus
1219 * behind a MEM-AP or directly in the AP.
1221 * @param mode Method to access the component (AP or MEM-AP).
1222 * @param ap Pointer to AP containing the component.
1223 * @param component_base On MEM-AP access method, base address of the component.
1224 * @param reg Offset of the component's register to read.
1225 * @param value Pointer to the store the read value.
1227 * @return ERROR_OK on success, else a fault code.
1229 static int dap_queue_read_reg(enum coresight_access_mode mode, struct adiv5_ap *ap,
1230 uint64_t component_base, unsigned int reg, uint32_t *value)
1232 if (mode == CS_ACCESS_AP)
1233 return dap_queue_ap_read(ap, reg, value);
1235 /* mode == CS_ACCESS_MEM_AP */
1236 return mem_ap_read_u32(ap, component_base + reg, value);
1240 * Read the CoreSight registers needed during ROM Table Parsing (RTP).
1242 * @param mode Method to access the component (AP or MEM-AP).
1243 * @param ap Pointer to AP containing the component.
1244 * @param component_base On MEM-AP access method, base address of the component.
1245 * @param v Pointer to the struct holding the value of registers.
1247 * @return ERROR_OK on success, else a fault code.
1249 static int rtp_read_cs_regs(enum coresight_access_mode mode, struct adiv5_ap *ap,
1250 target_addr_t component_base, struct cs_component_vals *v)
1252 assert(IS_ALIGNED(component_base, ARM_CS_ALIGN));
1255 uint32_t cid0, cid1, cid2, cid3;
1256 uint32_t pid0, pid1, pid2, pid3, pid4;
1257 int retval = ERROR_OK;
1260 v->component_base = component_base;
1263 /* sort by offset to gain speed */
1266 * Registers DEVARCH, DEVID and DEVTYPE are valid on Class 0x9 devices
1267 * only, but are at offset above 0xf00, so can be read on any device
1268 * without triggering error. Read them for eventual use on Class 0x9.
1270 if (retval == ERROR_OK)
1271 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVARCH, &v->devarch);
1273 if (retval == ERROR_OK)
1274 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVID, &v->devid);
1276 /* Same address as ARM_CS_C1_MEMTYPE */
1277 if (retval == ERROR_OK)
1278 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVTYPE, &v->devtype_memtype);
1280 if (retval == ERROR_OK)
1281 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR4, &pid4);
1283 if (retval == ERROR_OK)
1284 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR0, &pid0);
1285 if (retval == ERROR_OK)
1286 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR1, &pid1);
1287 if (retval == ERROR_OK)
1288 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR2, &pid2);
1289 if (retval == ERROR_OK)
1290 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR3, &pid3);
1292 if (retval == ERROR_OK)
1293 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR0, &cid0);
1294 if (retval == ERROR_OK)
1295 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR1, &cid1);
1296 if (retval == ERROR_OK)
1297 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR2, &cid2);
1298 if (retval == ERROR_OK)
1299 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR3, &cid3);
1301 if (retval == ERROR_OK)
1302 retval = dap_run(ap->dap);
1303 if (retval != ERROR_OK) {
1304 LOG_DEBUG("Failed read CoreSight registers");
1308 v->cid = (cid3 & 0xff) << 24
1309 | (cid2 & 0xff) << 16
1310 | (cid1 & 0xff) << 8
1312 v->pid = (uint64_t)(pid4 & 0xff) << 32
1313 | (pid3 & 0xff) << 24
1314 | (pid2 & 0xff) << 16
1315 | (pid1 & 0xff) << 8
1321 /* Part number interpretations are from Cortex
1322 * core specs, the CoreSight components TRM
1323 * (ARM DDI 0314H), CoreSight System Design
1324 * Guide (ARM DGI 0012D) and ETM specs; also
1325 * from chip observation (e.g. TI SDTI).
1328 static const struct dap_part_nums {
1329 uint16_t designer_id;
1333 } dap_part_nums[] = {
1334 { ARM_ID, 0x000, "Cortex-M3 SCS", "(System Control Space)", },
1335 { ARM_ID, 0x001, "Cortex-M3 ITM", "(Instrumentation Trace Module)", },
1336 { ARM_ID, 0x002, "Cortex-M3 DWT", "(Data Watchpoint and Trace)", },
1337 { ARM_ID, 0x003, "Cortex-M3 FPB", "(Flash Patch and Breakpoint)", },
1338 { ARM_ID, 0x008, "Cortex-M0 SCS", "(System Control Space)", },
1339 { ARM_ID, 0x00a, "Cortex-M0 DWT", "(Data Watchpoint and Trace)", },
1340 { ARM_ID, 0x00b, "Cortex-M0 BPU", "(Breakpoint Unit)", },
1341 { ARM_ID, 0x00c, "Cortex-M4 SCS", "(System Control Space)", },
1342 { ARM_ID, 0x00d, "CoreSight ETM11", "(Embedded Trace)", },
1343 { ARM_ID, 0x00e, "Cortex-M7 FPB", "(Flash Patch and Breakpoint)", },
1344 { ARM_ID, 0x193, "SoC-600 TSGEN", "(Timestamp Generator)", },
1345 { ARM_ID, 0x470, "Cortex-M1 ROM", "(ROM Table)", },
1346 { ARM_ID, 0x471, "Cortex-M0 ROM", "(ROM Table)", },
1347 { ARM_ID, 0x490, "Cortex-A15 GIC", "(Generic Interrupt Controller)", },
1348 { ARM_ID, 0x492, "Cortex-R52 GICD", "(Distributor)", },
1349 { ARM_ID, 0x493, "Cortex-R52 GICR", "(Redistributor)", },
1350 { ARM_ID, 0x4a1, "Cortex-A53 ROM", "(v8 Memory Map ROM Table)", },
1351 { ARM_ID, 0x4a2, "Cortex-A57 ROM", "(ROM Table)", },
1352 { ARM_ID, 0x4a3, "Cortex-A53 ROM", "(v7 Memory Map ROM Table)", },
1353 { ARM_ID, 0x4a4, "Cortex-A72 ROM", "(ROM Table)", },
1354 { ARM_ID, 0x4a9, "Cortex-A9 ROM", "(ROM Table)", },
1355 { ARM_ID, 0x4aa, "Cortex-A35 ROM", "(v8 Memory Map ROM Table)", },
1356 { ARM_ID, 0x4af, "Cortex-A15 ROM", "(ROM Table)", },
1357 { ARM_ID, 0x4b5, "Cortex-R5 ROM", "(ROM Table)", },
1358 { ARM_ID, 0x4b8, "Cortex-R52 ROM", "(ROM Table)", },
1359 { ARM_ID, 0x4c0, "Cortex-M0+ ROM", "(ROM Table)", },
1360 { ARM_ID, 0x4c3, "Cortex-M3 ROM", "(ROM Table)", },
1361 { ARM_ID, 0x4c4, "Cortex-M4 ROM", "(ROM Table)", },
1362 { ARM_ID, 0x4c7, "Cortex-M7 PPB ROM", "(Private Peripheral Bus ROM Table)", },
1363 { ARM_ID, 0x4c8, "Cortex-M7 ROM", "(ROM Table)", },
1364 { ARM_ID, 0x4e0, "Cortex-A35 ROM", "(v7 Memory Map ROM Table)", },
1365 { ARM_ID, 0x4e4, "Cortex-A76 ROM", "(ROM Table)", },
1366 { ARM_ID, 0x906, "CoreSight CTI", "(Cross Trigger)", },
1367 { ARM_ID, 0x907, "CoreSight ETB", "(Trace Buffer)", },
1368 { ARM_ID, 0x908, "CoreSight CSTF", "(Trace Funnel)", },
1369 { ARM_ID, 0x909, "CoreSight ATBR", "(Advanced Trace Bus Replicator)", },
1370 { ARM_ID, 0x910, "CoreSight ETM9", "(Embedded Trace)", },
1371 { ARM_ID, 0x912, "CoreSight TPIU", "(Trace Port Interface Unit)", },
1372 { ARM_ID, 0x913, "CoreSight ITM", "(Instrumentation Trace Macrocell)", },
1373 { ARM_ID, 0x914, "CoreSight SWO", "(Single Wire Output)", },
1374 { ARM_ID, 0x917, "CoreSight HTM", "(AHB Trace Macrocell)", },
1375 { ARM_ID, 0x920, "CoreSight ETM11", "(Embedded Trace)", },
1376 { ARM_ID, 0x921, "Cortex-A8 ETM", "(Embedded Trace)", },
1377 { ARM_ID, 0x922, "Cortex-A8 CTI", "(Cross Trigger)", },
1378 { ARM_ID, 0x923, "Cortex-M3 TPIU", "(Trace Port Interface Unit)", },
1379 { ARM_ID, 0x924, "Cortex-M3 ETM", "(Embedded Trace)", },
1380 { ARM_ID, 0x925, "Cortex-M4 ETM", "(Embedded Trace)", },
1381 { ARM_ID, 0x930, "Cortex-R4 ETM", "(Embedded Trace)", },
1382 { ARM_ID, 0x931, "Cortex-R5 ETM", "(Embedded Trace)", },
1383 { ARM_ID, 0x932, "CoreSight MTB-M0+", "(Micro Trace Buffer)", },
1384 { ARM_ID, 0x941, "CoreSight TPIU-Lite", "(Trace Port Interface Unit)", },
1385 { ARM_ID, 0x950, "Cortex-A9 PTM", "(Program Trace Macrocell)", },
1386 { ARM_ID, 0x955, "Cortex-A5 ETM", "(Embedded Trace)", },
1387 { ARM_ID, 0x95a, "Cortex-A72 ETM", "(Embedded Trace)", },
1388 { ARM_ID, 0x95b, "Cortex-A17 PTM", "(Program Trace Macrocell)", },
1389 { ARM_ID, 0x95d, "Cortex-A53 ETM", "(Embedded Trace)", },
1390 { ARM_ID, 0x95e, "Cortex-A57 ETM", "(Embedded Trace)", },
1391 { ARM_ID, 0x95f, "Cortex-A15 PTM", "(Program Trace Macrocell)", },
1392 { ARM_ID, 0x961, "CoreSight TMC", "(Trace Memory Controller)", },
1393 { ARM_ID, 0x962, "CoreSight STM", "(System Trace Macrocell)", },
1394 { ARM_ID, 0x975, "Cortex-M7 ETM", "(Embedded Trace)", },
1395 { ARM_ID, 0x9a0, "CoreSight PMU", "(Performance Monitoring Unit)", },
1396 { ARM_ID, 0x9a1, "Cortex-M4 TPIU", "(Trace Port Interface Unit)", },
1397 { ARM_ID, 0x9a4, "CoreSight GPR", "(Granular Power Requester)", },
1398 { ARM_ID, 0x9a5, "Cortex-A5 PMU", "(Performance Monitor Unit)", },
1399 { ARM_ID, 0x9a7, "Cortex-A7 PMU", "(Performance Monitor Unit)", },
1400 { ARM_ID, 0x9a8, "Cortex-A53 CTI", "(Cross Trigger)", },
1401 { ARM_ID, 0x9a9, "Cortex-M7 TPIU", "(Trace Port Interface Unit)", },
1402 { ARM_ID, 0x9ae, "Cortex-A17 PMU", "(Performance Monitor Unit)", },
1403 { ARM_ID, 0x9af, "Cortex-A15 PMU", "(Performance Monitor Unit)", },
1404 { ARM_ID, 0x9b6, "Cortex-R52 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1405 { ARM_ID, 0x9b7, "Cortex-R7 PMU", "(Performance Monitor Unit)", },
1406 { ARM_ID, 0x9d3, "Cortex-A53 PMU", "(Performance Monitor Unit)", },
1407 { ARM_ID, 0x9d7, "Cortex-A57 PMU", "(Performance Monitor Unit)", },
1408 { ARM_ID, 0x9d8, "Cortex-A72 PMU", "(Performance Monitor Unit)", },
1409 { ARM_ID, 0x9da, "Cortex-A35 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1410 { ARM_ID, 0x9e2, "SoC-600 APB-AP", "(APB4 Memory Access Port)", },
1411 { ARM_ID, 0x9e3, "SoC-600 AHB-AP", "(AHB5 Memory Access Port)", },
1412 { ARM_ID, 0x9e4, "SoC-600 AXI-AP", "(AXI Memory Access Port)", },
1413 { ARM_ID, 0x9e5, "SoC-600 APv1 Adapter", "(Access Port v1 Adapter)", },
1414 { ARM_ID, 0x9e6, "SoC-600 JTAG-AP", "(JTAG Access Port)", },
1415 { ARM_ID, 0x9e7, "SoC-600 TPIU", "(Trace Port Interface Unit)", },
1416 { ARM_ID, 0x9e8, "SoC-600 TMC ETR/ETS", "(Embedded Trace Router/Streamer)", },
1417 { ARM_ID, 0x9e9, "SoC-600 TMC ETB", "(Embedded Trace Buffer)", },
1418 { ARM_ID, 0x9ea, "SoC-600 TMC ETF", "(Embedded Trace FIFO)", },
1419 { ARM_ID, 0x9eb, "SoC-600 ATB Funnel", "(Trace Funnel)", },
1420 { ARM_ID, 0x9ec, "SoC-600 ATB Replicator", "(Trace Replicator)", },
1421 { ARM_ID, 0x9ed, "SoC-600 CTI", "(Cross Trigger)", },
1422 { ARM_ID, 0x9ee, "SoC-600 CATU", "(Address Translation Unit)", },
1423 { ARM_ID, 0xc05, "Cortex-A5 Debug", "(Debug Unit)", },
1424 { ARM_ID, 0xc07, "Cortex-A7 Debug", "(Debug Unit)", },
1425 { ARM_ID, 0xc08, "Cortex-A8 Debug", "(Debug Unit)", },
1426 { ARM_ID, 0xc09, "Cortex-A9 Debug", "(Debug Unit)", },
1427 { ARM_ID, 0xc0e, "Cortex-A17 Debug", "(Debug Unit)", },
1428 { ARM_ID, 0xc0f, "Cortex-A15 Debug", "(Debug Unit)", },
1429 { ARM_ID, 0xc14, "Cortex-R4 Debug", "(Debug Unit)", },
1430 { ARM_ID, 0xc15, "Cortex-R5 Debug", "(Debug Unit)", },
1431 { ARM_ID, 0xc17, "Cortex-R7 Debug", "(Debug Unit)", },
1432 { ARM_ID, 0xd03, "Cortex-A53 Debug", "(Debug Unit)", },
1433 { ARM_ID, 0xd04, "Cortex-A35 Debug", "(Debug Unit)", },
1434 { ARM_ID, 0xd07, "Cortex-A57 Debug", "(Debug Unit)", },
1435 { ARM_ID, 0xd08, "Cortex-A72 Debug", "(Debug Unit)", },
1436 { ARM_ID, 0xd0b, "Cortex-A76 Debug", "(Debug Unit)", },
1437 { ARM_ID, 0xd0c, "Neoverse N1", "(Debug Unit)", },
1438 { ARM_ID, 0xd13, "Cortex-R52 Debug", "(Debug Unit)", },
1439 { ARM_ID, 0xd49, "Neoverse N2", "(Debug Unit)", },
1440 { 0x017, 0x120, "TI SDTI", "(System Debug Trace Interface)", }, /* from OMAP3 memmap */
1441 { 0x017, 0x343, "TI DAPCTL", "", }, /* from OMAP3 memmap */
1442 { 0x017, 0x9af, "MSP432 ROM", "(ROM Table)" },
1443 { 0x01f, 0xcd0, "Atmel CPU with DSU", "(CPU)" },
1444 { 0x041, 0x1db, "XMC4500 ROM", "(ROM Table)" },
1445 { 0x041, 0x1df, "XMC4700/4800 ROM", "(ROM Table)" },
1446 { 0x041, 0x1ed, "XMC1000 ROM", "(ROM Table)" },
1447 { 0x065, 0x000, "SHARC+/Blackfin+", "", },
1448 { 0x070, 0x440, "Qualcomm QDSS Component v1", "(Qualcomm Designed CoreSight Component v1)", },
1449 { 0x0bf, 0x100, "Brahma-B53 Debug", "(Debug Unit)", },
1450 { 0x0bf, 0x9d3, "Brahma-B53 PMU", "(Performance Monitor Unit)", },
1451 { 0x0bf, 0x4a1, "Brahma-B53 ROM", "(ROM Table)", },
1452 { 0x0bf, 0x721, "Brahma-B53 ROM", "(ROM Table)", },
1453 { 0x1eb, 0x181, "Tegra 186 ROM", "(ROM Table)", },
1454 { 0x1eb, 0x202, "Denver ETM", "(Denver Embedded Trace)", },
1455 { 0x1eb, 0x211, "Tegra 210 ROM", "(ROM Table)", },
1456 { 0x1eb, 0x302, "Denver Debug", "(Debug Unit)", },
1457 { 0x1eb, 0x402, "Denver PMU", "(Performance Monitor Unit)", },
1460 static const struct dap_part_nums *pidr_to_part_num(unsigned int designer_id, unsigned int part_num)
1462 static const struct dap_part_nums unknown = {
1463 .type = "Unrecognized",
1467 for (unsigned int i = 0; i < ARRAY_SIZE(dap_part_nums); i++)
1468 if (dap_part_nums[i].designer_id == designer_id && dap_part_nums[i].part_num == part_num)
1469 return &dap_part_nums[i];
1474 static int dap_devtype_display(struct command_invocation *cmd, uint32_t devtype)
1476 const char *major = "Reserved", *subtype = "Reserved";
1477 const unsigned int minor = (devtype & ARM_CS_C9_DEVTYPE_SUB_MASK) >> ARM_CS_C9_DEVTYPE_SUB_SHIFT;
1478 const unsigned int devtype_major = (devtype & ARM_CS_C9_DEVTYPE_MAJOR_MASK) >> ARM_CS_C9_DEVTYPE_MAJOR_SHIFT;
1479 switch (devtype_major) {
1481 major = "Miscellaneous";
1487 subtype = "Validation component";
1492 major = "Trace Sink";
1509 major = "Trace Link";
1515 subtype = "Funnel, router";
1521 subtype = "FIFO, buffer";
1526 major = "Trace Source";
1532 subtype = "Processor";
1538 subtype = "Engine/Coprocessor";
1544 subtype = "Software";
1549 major = "Debug Control";
1555 subtype = "Trigger Matrix";
1558 subtype = "Debug Auth";
1561 subtype = "Power Requestor";
1566 major = "Debug Logic";
1572 subtype = "Processor";
1578 subtype = "Engine/Coprocessor";
1589 major = "Performance Monitor";
1595 subtype = "Processor";
1601 subtype = "Engine/Coprocessor";
1612 command_print(cmd, "\t\tType is 0x%02x, %s, %s",
1613 devtype & ARM_CS_C9_DEVTYPE_MASK,
1619 * Actions/operations to be executed while parsing ROM tables.
1623 * Executed at the start of a new AP, typically to print the AP header.
1624 * @param ap Pointer to AP.
1625 * @param depth The current depth level of ROM table.
1626 * @param priv Pointer to private data.
1627 * @return ERROR_OK on success, else a fault code.
1629 int (*ap_header)(struct adiv5_ap *ap, int depth, void *priv);
1631 * Executed at the start of a new MEM-AP, typically to print the MEM-AP header.
1632 * @param retval Error encountered while reading AP.
1633 * @param ap Pointer to AP.
1634 * @param dbgbase Value of MEM-AP Debug Base Address register.
1635 * @param apid Value of MEM-AP IDR Identification Register.
1636 * @param depth The current depth level of ROM table.
1637 * @param priv Pointer to private data.
1638 * @return ERROR_OK on success, else a fault code.
1640 int (*mem_ap_header)(int retval, struct adiv5_ap *ap, uint64_t dbgbase,
1641 uint32_t apid, int depth, void *priv);
1643 * Executed when a CoreSight component is parsed, typically to print
1644 * information on the component.
1645 * @param retval Error encountered while reading component's registers.
1646 * @param v Pointer to a container of the component's registers.
1647 * @param depth The current depth level of ROM table.
1648 * @param priv Pointer to private data.
1649 * @return ERROR_OK on success, else a fault code.
1651 int (*cs_component)(int retval, struct cs_component_vals *v, int depth, void *priv);
1653 * Executed for each entry of a ROM table, typically to print the entry
1654 * and information about validity or end-of-table mark.
1655 * @param retval Error encountered while reading the ROM table entry.
1656 * @param depth The current depth level of ROM table.
1657 * @param offset The offset of the entry in the ROM table.
1658 * @param romentry The value of the ROM table entry.
1659 * @param priv Pointer to private data.
1660 * @return ERROR_OK on success, else a fault code.
1662 int (*rom_table_entry)(int retval, int depth, unsigned int offset, uint64_t romentry,
1671 * Wrapper around struct rtp_ops::ap_header.
1673 static int rtp_ops_ap_header(const struct rtp_ops *ops,
1674 struct adiv5_ap *ap, int depth)
1677 return ops->ap_header(ap, depth, ops->priv);
1683 * Wrapper around struct rtp_ops::mem_ap_header.
1684 * Input parameter @a retval is propagated.
1686 static int rtp_ops_mem_ap_header(const struct rtp_ops *ops,
1687 int retval, struct adiv5_ap *ap, uint64_t dbgbase, uint32_t apid, int depth)
1689 if (!ops->mem_ap_header)
1692 int retval1 = ops->mem_ap_header(retval, ap, dbgbase, apid, depth, ops->priv);
1693 if (retval != ERROR_OK)
1699 * Wrapper around struct rtp_ops::cs_component.
1700 * Input parameter @a retval is propagated.
1702 static int rtp_ops_cs_component(const struct rtp_ops *ops,
1703 int retval, struct cs_component_vals *v, int depth)
1705 if (!ops->cs_component)
1708 int retval1 = ops->cs_component(retval, v, depth, ops->priv);
1709 if (retval != ERROR_OK)
1715 * Wrapper around struct rtp_ops::rom_table_entry.
1716 * Input parameter @a retval is propagated.
1718 static int rtp_ops_rom_table_entry(const struct rtp_ops *ops,
1719 int retval, int depth, unsigned int offset, uint64_t romentry)
1721 if (!ops->rom_table_entry)
1724 int retval1 = ops->rom_table_entry(retval, depth, offset, romentry, ops->priv);
1725 if (retval != ERROR_OK)
1730 /* Broken ROM tables can have circular references. Stop after a while */
1731 #define ROM_TABLE_MAX_DEPTH (16)
1734 * Value used only during lookup of a CoreSight component in ROM table.
1735 * Return CORESIGHT_COMPONENT_FOUND when component is found.
1736 * Return ERROR_OK when component is not found yet.
1737 * Return any other ERROR_* in case of error.
1739 #define CORESIGHT_COMPONENT_FOUND (1)
1741 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth);
1742 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1743 struct adiv5_ap *ap, target_addr_t dbgbase, bool *is_mem_ap, int depth);
1745 static int rtp_rom_loop(enum coresight_access_mode mode, const struct rtp_ops *ops,
1746 struct adiv5_ap *ap, target_addr_t base_address, int depth,
1747 unsigned int width, unsigned int max_entries)
1749 /* ADIv6 AP ROM table provide offset from current AP */
1750 if (mode == CS_ACCESS_AP)
1751 base_address = ap->ap_num;
1753 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1755 unsigned int offset = 0;
1756 while (max_entries--) {
1758 uint32_t romentry_low, romentry_high;
1759 target_addr_t component_base;
1760 unsigned int saved_offset = offset;
1762 int retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_low);
1764 if (retval == ERROR_OK && width == 64) {
1765 retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_high);
1768 if (retval == ERROR_OK)
1769 retval = dap_run(ap->dap);
1770 if (retval != ERROR_OK) {
1771 LOG_DEBUG("Failed read ROM table entry");
1776 romentry = (((uint64_t)romentry_high) << 32) | romentry_low;
1777 component_base = base_address +
1778 ((((uint64_t)romentry_high) << 32) | (romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK));
1780 romentry = romentry_low;
1781 /* "romentry" is signed */
1782 component_base = base_address + (int32_t)(romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK);
1783 if (!is_64bit_ap(ap))
1784 component_base = (uint32_t)component_base;
1786 retval = rtp_ops_rom_table_entry(ops, retval, depth, saved_offset, romentry);
1787 if (retval != ERROR_OK)
1790 if (romentry == 0) {
1791 /* End of ROM table */
1795 if (!(romentry & ARM_CS_ROMENTRY_PRESENT))
1799 if (mode == CS_ACCESS_AP) {
1800 struct adiv5_ap *next_ap = dap_get_ap(ap->dap, component_base);
1802 LOG_DEBUG("Wrong AP # 0x%" PRIx64, component_base);
1805 retval = rtp_ap(ops, next_ap, depth + 1);
1806 dap_put_ap(next_ap);
1808 /* mode == CS_ACCESS_MEM_AP */
1809 retval = rtp_cs_component(mode, ops, ap, component_base, NULL, depth + 1);
1811 if (retval == CORESIGHT_COMPONENT_FOUND)
1812 return CORESIGHT_COMPONENT_FOUND;
1813 if (retval != ERROR_OK) {
1814 /* TODO: do we need to send an ABORT before continuing? */
1815 LOG_DEBUG("Ignore error parsing CoreSight component");
1823 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1824 struct adiv5_ap *ap, target_addr_t base_address, bool *is_mem_ap, int depth)
1826 struct cs_component_vals v;
1829 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1834 if (depth > ROM_TABLE_MAX_DEPTH)
1835 retval = ERROR_FAIL;
1837 retval = rtp_read_cs_regs(mode, ap, base_address, &v);
1839 retval = rtp_ops_cs_component(ops, retval, &v, depth);
1840 if (retval == CORESIGHT_COMPONENT_FOUND)
1841 return CORESIGHT_COMPONENT_FOUND;
1842 if (retval != ERROR_OK)
1843 return ERROR_OK; /* Don't abort recursion */
1845 if (!is_valid_arm_cs_cidr(v.cid))
1846 return ERROR_OK; /* Don't abort recursion */
1848 const unsigned int class = ARM_CS_CIDR_CLASS(v.cid);
1850 if (class == ARM_CS_CLASS_0X1_ROM_TABLE)
1851 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 960);
1853 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
1854 if ((v.devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
1858 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_MEM_AP)
1861 /* SoC-600 APv1 Adapter */
1862 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_UNKNOWN_V2 &&
1863 ARM_CS_PIDR_DESIGNER(v.pid) == ARM_ID &&
1864 ARM_CS_PIDR_PART(v.pid) == 0x9e5)
1868 /* quit if not ROM table */
1869 if ((v.devarch & DEVARCH_ID_MASK) != DEVARCH_ROM_C_0X9)
1872 if ((v.devid & ARM_CS_C9_DEVID_FORMAT_MASK) == ARM_CS_C9_DEVID_FORMAT_64BIT)
1873 return rtp_rom_loop(mode, ops, ap, base_address, depth, 64, 256);
1875 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 512);
1878 /* Class other than 0x1 and 0x9 */
1882 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth)
1885 target_addr_t dbgbase, invalid_entry;
1887 int retval = rtp_ops_ap_header(ops, ap, depth);
1888 if (retval != ERROR_OK || depth > ROM_TABLE_MAX_DEPTH)
1889 return ERROR_OK; /* Don't abort recursion */
1891 if (is_adiv6(ap->dap)) {
1893 retval = rtp_cs_component(CS_ACCESS_AP, ops, ap, 0, &is_mem_ap, depth);
1894 if (retval == CORESIGHT_COMPONENT_FOUND)
1895 return CORESIGHT_COMPONENT_FOUND;
1896 if (retval != ERROR_OK)
1897 return ERROR_OK; /* Don't abort recursion */
1901 /* Continue for an ADIv6 MEM-AP or SoC-600 APv1 Adapter */
1904 /* Now we read ROM table ID registers, ref. ARM IHI 0029B sec */
1905 retval = dap_get_debugbase(ap, &dbgbase, &apid);
1906 if (retval != ERROR_OK)
1908 retval = rtp_ops_mem_ap_header(ops, retval, ap, dbgbase, apid, depth);
1909 if (retval != ERROR_OK)
1915 /* NOTE: a MEM-AP may have a single CoreSight component that's
1916 * not a ROM table ... or have no such components at all.
1918 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1920 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1921 if (is_64bit_ap(ap))
1922 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1924 invalid_entry = 0xFFFFFFFFul;
1926 if (dbgbase != invalid_entry && (dbgbase & 0x3) != 0x2) {
1927 retval = rtp_cs_component(CS_ACCESS_MEM_AP, ops, ap,
1928 dbgbase & 0xFFFFFFFFFFFFF000ull, NULL, depth);
1929 if (retval == CORESIGHT_COMPONENT_FOUND)
1930 return CORESIGHT_COMPONENT_FOUND;
1937 /* Actions for command "dap info" */
1939 static int dap_info_ap_header(struct adiv5_ap *ap, int depth, void *priv)
1941 struct command_invocation *cmd = priv;
1943 if (depth > ROM_TABLE_MAX_DEPTH) {
1944 command_print(cmd, "\tTables too deep");
1948 command_print(cmd, "%sAP # 0x%" PRIx64, (depth) ? "\t\t" : "", ap->ap_num);
1952 static int dap_info_mem_ap_header(int retval, struct adiv5_ap *ap,
1953 target_addr_t dbgbase, uint32_t apid, int depth, void *priv)
1955 struct command_invocation *cmd = priv;
1956 target_addr_t invalid_entry;
1959 if (retval != ERROR_OK) {
1960 command_print(cmd, "\t\tCan't read MEM-AP, the corresponding core might be turned off");
1964 if (depth > ROM_TABLE_MAX_DEPTH) {
1965 command_print(cmd, "\tTables too deep");
1970 snprintf(tabs, sizeof(tabs), "\t[L%02d] ", depth);
1972 command_print(cmd, "\t\tAP ID register 0x%8.8" PRIx32, apid);
1974 command_print(cmd, "\t\tNo AP found at this AP#0x%" PRIx64, ap->ap_num);
1978 command_print(cmd, "\t\tType is %s", ap_type_to_description(apid & AP_TYPE_MASK));
1980 /* NOTE: a MEM-AP may have a single CoreSight component that's
1981 * not a ROM table ... or have no such components at all.
1983 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1985 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1986 if (is_64bit_ap(ap))
1987 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1989 invalid_entry = 0xFFFFFFFFul;
1991 command_print(cmd, "%sMEM-AP BASE " TARGET_ADDR_FMT, tabs, dbgbase);
1993 if (dbgbase == invalid_entry || (dbgbase & 0x3) == 0x2) {
1994 command_print(cmd, "\t\tNo ROM table present");
1997 command_print(cmd, "\t\tValid ROM table present");
1999 command_print(cmd, "\t\tROM table in legacy format");
2006 static int dap_info_cs_component(int retval, struct cs_component_vals *v, int depth, void *priv)
2008 struct command_invocation *cmd = priv;
2010 if (depth > ROM_TABLE_MAX_DEPTH) {
2011 command_print(cmd, "\tTables too deep");
2015 if (v->mode == CS_ACCESS_MEM_AP)
2016 command_print(cmd, "\t\tComponent base address " TARGET_ADDR_FMT, v->component_base);
2018 if (retval != ERROR_OK) {
2019 command_print(cmd, "\t\tCan't read component, the corresponding core might be turned off");
2023 if (!is_valid_arm_cs_cidr(v->cid)) {
2024 command_print(cmd, "\t\tInvalid CID 0x%08" PRIx32, v->cid);
2025 return ERROR_OK; /* Don't abort recursion */
2028 /* component may take multiple 4K pages */
2029 uint32_t size = ARM_CS_PIDR_SIZE(v->pid);
2031 command_print(cmd, "\t\tStart address " TARGET_ADDR_FMT, v->component_base - 0x1000 * size);
2033 command_print(cmd, "\t\tPeripheral ID 0x%010" PRIx64, v->pid);
2035 const unsigned int part_num = ARM_CS_PIDR_PART(v->pid);
2036 unsigned int designer_id = ARM_CS_PIDR_DESIGNER(v->pid);
2038 if (v->pid & ARM_CS_PIDR_JEDEC) {
2040 command_print(cmd, "\t\tDesigner is 0x%03x, %s",
2041 designer_id, jep106_manufacturer(designer_id));
2043 /* Legacy ASCII ID, clear invalid bits */
2044 designer_id &= 0x7f;
2045 command_print(cmd, "\t\tDesigner ASCII code 0x%02x, %s",
2046 designer_id, designer_id == 0x41 ? "ARM" : "<unknown>");
2049 const struct dap_part_nums *partnum = pidr_to_part_num(designer_id, part_num);
2050 command_print(cmd, "\t\tPart is 0x%03x, %s %s", part_num, partnum->type, partnum->full);
2052 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2053 command_print(cmd, "\t\tComponent class is 0x%x, %s", class, class_description[class]);
2055 if (class == ARM_CS_CLASS_0X1_ROM_TABLE) {
2056 if (v->devtype_memtype & ARM_CS_C1_MEMTYPE_SYSMEM_MASK)
2057 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2059 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2063 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
2064 dap_devtype_display(cmd, v->devtype_memtype);
2066 /* REVISIT also show ARM_CS_C9_DEVID */
2068 if ((v->devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
2071 unsigned int architect_id = ARM_CS_C9_DEVARCH_ARCHITECT(v->devarch);
2072 unsigned int revision = ARM_CS_C9_DEVARCH_REVISION(v->devarch);
2073 command_print(cmd, "\t\tDev Arch is 0x%08" PRIx32 ", %s \"%s\" rev.%u", v->devarch,
2074 jep106_manufacturer(architect_id), class0x9_devarch_description(v->devarch),
2077 if ((v->devarch & DEVARCH_ID_MASK) == DEVARCH_ROM_C_0X9) {
2078 command_print(cmd, "\t\tType is ROM table");
2080 if (v->devid & ARM_CS_C9_DEVID_SYSMEM_MASK)
2081 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2083 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2088 /* Class other than 0x1 and 0x9 */
2092 static int dap_info_rom_table_entry(int retval, int depth,
2093 unsigned int offset, uint64_t romentry, void *priv)
2095 struct command_invocation *cmd = priv;
2099 snprintf(tabs, sizeof(tabs), "[L%02d] ", depth);
2101 if (retval != ERROR_OK) {
2102 command_print(cmd, "\t%sROMTABLE[0x%x] Read error", tabs, offset);
2103 command_print(cmd, "\t\tUnable to continue");
2104 command_print(cmd, "\t%s\tStop parsing of ROM table", tabs);
2108 command_print(cmd, "\t%sROMTABLE[0x%x] = 0x%08" PRIx64,
2109 tabs, offset, romentry);
2111 if (romentry == 0) {
2112 command_print(cmd, "\t%s\tEnd of ROM table", tabs);
2116 if (!(romentry & ARM_CS_ROMENTRY_PRESENT)) {
2117 command_print(cmd, "\t\tComponent not present");
2124 int dap_info_command(struct command_invocation *cmd, struct adiv5_ap *ap)
2126 struct rtp_ops dap_info_ops = {
2127 .ap_header = dap_info_ap_header,
2128 .mem_ap_header = dap_info_mem_ap_header,
2129 .cs_component = dap_info_cs_component,
2130 .rom_table_entry = dap_info_rom_table_entry,
2134 return rtp_ap(&dap_info_ops, ap, 0);
2137 /* Actions for dap_lookup_cs_component() */
2139 struct dap_lookup_data {
2144 uint64_t component_base;
2148 static int dap_lookup_cs_component_cs_component(int retval,
2149 struct cs_component_vals *v, int depth, void *priv)
2151 struct dap_lookup_data *lookup = priv;
2153 if (retval != ERROR_OK)
2156 if (!is_valid_arm_cs_cidr(v->cid))
2159 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2160 if (class != ARM_CS_CLASS_0X9_CS_COMPONENT)
2163 if ((v->devtype_memtype & ARM_CS_C9_DEVTYPE_MASK) != lookup->type)
2167 /* search for next one */
2173 lookup->component_base = v->component_base;
2174 lookup->ap_num = v->ap->ap_num;
2175 return CORESIGHT_COMPONENT_FOUND;
2178 int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type,
2179 target_addr_t *addr, int32_t core_id)
2181 struct dap_lookup_data lookup = {
2185 struct rtp_ops dap_lookup_cs_component_ops = {
2187 .mem_ap_header = NULL,
2188 .cs_component = dap_lookup_cs_component_cs_component,
2189 .rom_table_entry = NULL,
2193 int retval = rtp_ap(&dap_lookup_cs_component_ops, ap, 0);
2194 if (retval == CORESIGHT_COMPONENT_FOUND) {
2195 if (lookup.ap_num != ap->ap_num) {
2196 /* TODO: handle search from root ROM table */
2197 LOG_DEBUG("CS lookup ended in AP # 0x%" PRIx64 ". Ignore it", lookup.ap_num);
2198 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2200 LOG_DEBUG("CS lookup found at 0x%" PRIx64, lookup.component_base);
2201 *addr = lookup.component_base;
2204 if (retval != ERROR_OK) {
2205 LOG_DEBUG("CS lookup error %d", retval);
2208 LOG_DEBUG("CS lookup not found");
2209 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2212 enum adiv5_cfg_param {
2216 CFG_CTIBASE, /* DEPRECATED */
2219 static const struct jim_nvp nvp_config_opts[] = {
2220 { .name = "-dap", .value = CFG_DAP },
2221 { .name = "-ap-num", .value = CFG_AP_NUM },
2222 { .name = "-baseaddr", .value = CFG_BASEADDR },
2223 { .name = "-ctibase", .value = CFG_CTIBASE }, /* DEPRECATED */
2224 { .name = NULL, .value = -1 }
2227 static int adiv5_jim_spot_configure(struct jim_getopt_info *goi,
2228 struct adiv5_dap **dap_p, uint64_t *ap_num_p, uint32_t *base_p)
2230 assert(dap_p && ap_num_p);
2235 Jim_SetEmptyResult(goi->interp);
2238 int e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2241 return JIM_CONTINUE;
2243 /* base_p can be NULL, then '-baseaddr' option is treated as unknown */
2244 if (!base_p && (n->value == CFG_BASEADDR || n->value == CFG_CTIBASE))
2245 return JIM_CONTINUE;
2247 e = jim_getopt_obj(goi, NULL);
2253 if (goi->isconfigure) {
2255 struct adiv5_dap *dap;
2256 e = jim_getopt_obj(goi, &o_t);
2259 dap = dap_instance_by_jim_obj(goi->interp, o_t);
2261 Jim_SetResultString(goi->interp, "DAP name invalid!", -1);
2264 if (*dap_p && *dap_p != dap) {
2265 Jim_SetResultString(goi->interp,
2266 "DAP assignment cannot be changed!", -1);
2274 Jim_SetResultString(goi->interp, "DAP not configured", -1);
2277 Jim_SetResultString(goi->interp, adiv5_dap_name(*dap_p), -1);
2282 if (goi->isconfigure) {
2283 /* jim_wide is a signed 64 bits int, ap_num is unsigned with max 52 bits */
2285 e = jim_getopt_wide(goi, &ap_num);
2288 /* we still don't know dap->adi_version */
2289 if (ap_num < 0 || (ap_num > DP_APSEL_MAX && (ap_num & 0xfff))) {
2290 Jim_SetResultString(goi->interp, "Invalid AP number!", -1);
2297 if (*ap_num_p == DP_APSEL_INVALID) {
2298 Jim_SetResultString(goi->interp, "AP number not configured", -1);
2301 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *ap_num_p));
2306 LOG_WARNING("DEPRECATED! use \'-baseaddr' not \'-ctibase\'");
2309 if (goi->isconfigure) {
2311 e = jim_getopt_wide(goi, &base);
2314 *base_p = (uint32_t)base;
2318 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *base_p));
2326 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "NO PARAMS");
2330 int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
2332 struct adiv5_private_config *pc;
2335 pc = (struct adiv5_private_config *)target->private_config;
2337 pc = calloc(1, sizeof(struct adiv5_private_config));
2339 LOG_ERROR("Out of memory");
2342 pc->ap_num = DP_APSEL_INVALID;
2343 target->private_config = pc;
2346 target->has_dap = true;
2348 e = adiv5_jim_spot_configure(goi, &pc->dap, &pc->ap_num, NULL);
2352 if (pc->dap && !target->dap_configured) {
2353 if (target->tap_configured) {
2355 Jim_SetResultString(goi->interp,
2356 "-chain-position and -dap configparams are mutually exclusive!", -1);
2359 target->tap = pc->dap->tap;
2360 target->dap_configured = true;
2366 int adiv5_verify_config(struct adiv5_private_config *pc)
2377 int adiv5_jim_mem_ap_spot_configure(struct adiv5_mem_ap_spot *cfg,
2378 struct jim_getopt_info *goi)
2380 return adiv5_jim_spot_configure(goi, &cfg->dap, &cfg->ap_num, &cfg->base);
2383 int adiv5_mem_ap_spot_init(struct adiv5_mem_ap_spot *p)
2386 p->ap_num = DP_APSEL_INVALID;
2391 COMMAND_HANDLER(handle_dap_info_command)
2393 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2401 if (!strcmp(CMD_ARGV[0], "root")) {
2402 if (!is_adiv6(dap)) {
2403 command_print(CMD, "Option \"root\" not allowed with ADIv5 DAP");
2404 return ERROR_COMMAND_ARGUMENT_INVALID;
2406 int retval = adiv6_dap_read_baseptr(CMD, dap, &apsel);
2407 if (retval != ERROR_OK) {
2408 command_print(CMD, "Failed reading DAP baseptr");
2413 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2414 if (!is_ap_num_valid(dap, apsel)) {
2415 command_print(CMD, "Invalid AP number");
2416 return ERROR_COMMAND_ARGUMENT_INVALID;
2420 return ERROR_COMMAND_SYNTAX_ERROR;
2423 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2425 command_print(CMD, "Cannot get AP");
2429 int retval = dap_info_command(CMD, ap);
2434 COMMAND_HANDLER(dap_baseaddr_command)
2436 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2438 uint32_t baseaddr_lower, baseaddr_upper;
2439 struct adiv5_ap *ap;
2440 target_addr_t baseaddr;
2450 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2451 if (!is_ap_num_valid(dap, apsel)) {
2452 command_print(CMD, "Invalid AP number");
2453 return ERROR_COMMAND_ARGUMENT_INVALID;
2457 return ERROR_COMMAND_SYNTAX_ERROR;
2460 /* NOTE: assumes we're talking to a MEM-AP, which
2461 * has a base address. There are other kinds of AP,
2462 * though they're not common for now. This should
2463 * use the ID register to verify it's a MEM-AP.
2466 ap = dap_get_ap(dap, apsel);
2468 command_print(CMD, "Cannot get AP");
2472 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseaddr_lower);
2474 if (retval == ERROR_OK && ap->cfg_reg == MEM_AP_REG_CFG_INVALID)
2475 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
2477 if (retval == ERROR_OK && (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap))) {
2478 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
2479 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseaddr_upper);
2482 if (retval == ERROR_OK)
2483 retval = dap_run(dap);
2485 if (retval != ERROR_OK)
2488 if (is_64bit_ap(ap)) {
2489 baseaddr = (((target_addr_t)baseaddr_upper) << 32) | baseaddr_lower;
2490 command_print(CMD, "0x%016" PRIx64, baseaddr);
2492 command_print(CMD, "0x%08" PRIx32, baseaddr_lower);
2497 COMMAND_HANDLER(dap_memaccess_command)
2499 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2500 struct adiv5_ap *ap;
2501 uint32_t memaccess_tck;
2505 ap = dap_get_ap(dap, dap->apsel);
2507 command_print(CMD, "Cannot get AP");
2510 memaccess_tck = ap->memaccess_tck;
2513 ap = dap_get_config_ap(dap, dap->apsel);
2515 command_print(CMD, "Cannot get AP");
2518 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], memaccess_tck);
2519 ap->memaccess_tck = memaccess_tck;
2522 return ERROR_COMMAND_SYNTAX_ERROR;
2527 command_print(CMD, "memory bus access delay set to %" PRIu32 " tck",
2533 COMMAND_HANDLER(dap_apsel_command)
2535 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2540 command_print(CMD, "0x%" PRIx64, dap->apsel);
2543 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2544 if (!is_ap_num_valid(dap, apsel)) {
2545 command_print(CMD, "Invalid AP number");
2546 return ERROR_COMMAND_ARGUMENT_INVALID;
2550 return ERROR_COMMAND_SYNTAX_ERROR;
2557 COMMAND_HANDLER(dap_apcsw_command)
2559 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2560 struct adiv5_ap *ap;
2561 uint32_t csw_val, csw_mask;
2565 ap = dap_get_ap(dap, dap->apsel);
2567 command_print(CMD, "Cannot get AP");
2570 command_print(CMD, "AP#0x%" PRIx64 " selected, csw 0x%8.8" PRIx32,
2571 dap->apsel, ap->csw_default);
2574 if (strcmp(CMD_ARGV[0], "default") == 0)
2575 csw_val = CSW_AHB_DEFAULT;
2577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2579 if (csw_val & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2580 LOG_ERROR("CSW value cannot include 'Size' and 'AddrInc' bit-fields");
2581 return ERROR_COMMAND_ARGUMENT_INVALID;
2583 ap = dap_get_config_ap(dap, dap->apsel);
2585 command_print(CMD, "Cannot get AP");
2588 ap->csw_default = csw_val;
2591 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2592 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], csw_mask);
2593 if (csw_mask & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2594 LOG_ERROR("CSW mask cannot include 'Size' and 'AddrInc' bit-fields");
2595 return ERROR_COMMAND_ARGUMENT_INVALID;
2597 ap = dap_get_config_ap(dap, dap->apsel);
2599 command_print(CMD, "Cannot get AP");
2602 ap->csw_default = (ap->csw_default & ~csw_mask) | (csw_val & csw_mask);
2605 return ERROR_COMMAND_SYNTAX_ERROR;
2614 COMMAND_HANDLER(dap_apid_command)
2616 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2626 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2627 if (!is_ap_num_valid(dap, apsel)) {
2628 command_print(CMD, "Invalid AP number");
2629 return ERROR_COMMAND_ARGUMENT_INVALID;
2633 return ERROR_COMMAND_SYNTAX_ERROR;
2636 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2638 command_print(CMD, "Cannot get AP");
2641 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &apid);
2642 if (retval != ERROR_OK) {
2646 retval = dap_run(dap);
2648 if (retval != ERROR_OK)
2651 command_print(CMD, "0x%8.8" PRIx32, apid);
2656 COMMAND_HANDLER(dap_apreg_command)
2658 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2660 uint32_t reg, value;
2663 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2664 return ERROR_COMMAND_SYNTAX_ERROR;
2666 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2667 if (!is_ap_num_valid(dap, apsel)) {
2668 command_print(CMD, "Invalid AP number");
2669 return ERROR_COMMAND_ARGUMENT_INVALID;
2672 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], reg);
2673 if (is_adiv6(dap)) {
2674 if (reg >= 4096 || (reg & 3)) {
2675 command_print(CMD, "Invalid reg value (should be less than 4096 and 4 bytes aligned)");
2676 return ERROR_COMMAND_ARGUMENT_INVALID;
2678 } else { /* ADI version 5 */
2679 if (reg >= 256 || (reg & 3)) {
2680 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2681 return ERROR_COMMAND_ARGUMENT_INVALID;
2685 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2687 command_print(CMD, "Cannot get AP");
2691 if (CMD_ARGC == 3) {
2692 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], value);
2693 /* see if user supplied register address is a match for the CSW or TAR register */
2694 if (reg == MEM_AP_REG_CSW(dap)) {
2695 ap->csw_value = 0; /* invalid, in case write fails */
2696 retval = dap_queue_ap_write(ap, reg, value);
2697 if (retval == ERROR_OK)
2698 ap->csw_value = value;
2699 } else if (reg == MEM_AP_REG_TAR(dap)) {
2700 retval = dap_queue_ap_write(ap, reg, value);
2701 if (retval == ERROR_OK)
2702 ap->tar_value = (ap->tar_value & ~0xFFFFFFFFull) | value;
2704 /* To track independent writes to TAR and TAR64, two tar_valid flags */
2705 /* should be used. To keep it simple, tar_valid is only invalidated on a */
2706 /* write fail. This approach causes a later re-write of the TAR and TAR64 */
2707 /* if tar_valid is false. */
2708 ap->tar_valid = false;
2710 } else if (reg == MEM_AP_REG_TAR64(dap)) {
2711 retval = dap_queue_ap_write(ap, reg, value);
2712 if (retval == ERROR_OK)
2713 ap->tar_value = (ap->tar_value & 0xFFFFFFFFull) | (((target_addr_t)value) << 32);
2715 /* See above comment for the MEM_AP_REG_TAR failed write case */
2716 ap->tar_valid = false;
2719 retval = dap_queue_ap_write(ap, reg, value);
2722 retval = dap_queue_ap_read(ap, reg, &value);
2724 if (retval == ERROR_OK)
2725 retval = dap_run(dap);
2729 if (retval != ERROR_OK)
2733 command_print(CMD, "0x%08" PRIx32, value);
2738 COMMAND_HANDLER(dap_dpreg_command)
2740 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2741 uint32_t reg, value;
2744 if (CMD_ARGC < 1 || CMD_ARGC > 2)
2745 return ERROR_COMMAND_SYNTAX_ERROR;
2747 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg);
2748 if (reg >= 256 || (reg & 3)) {
2749 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2750 return ERROR_COMMAND_ARGUMENT_INVALID;
2753 if (CMD_ARGC == 2) {
2754 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2755 retval = dap_queue_dp_write(dap, reg, value);
2757 retval = dap_queue_dp_read(dap, reg, &value);
2759 if (retval == ERROR_OK)
2760 retval = dap_run(dap);
2762 if (retval != ERROR_OK)
2766 command_print(CMD, "0x%08" PRIx32, value);
2771 COMMAND_HANDLER(dap_ti_be_32_quirks_command)
2773 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2774 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->ti_be_32_quirks,
2775 "TI BE-32 quirks mode");
2778 COMMAND_HANDLER(dap_nu_npcx_quirks_command)
2780 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2781 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->nu_npcx_quirks,
2782 "Nuvoton NPCX quirks mode");
2785 const struct command_registration dap_instance_commands[] = {
2788 .handler = handle_dap_info_command,
2789 .mode = COMMAND_EXEC,
2790 .help = "display ROM table for specified MEM-AP (default currently selected AP) "
2791 "or the ADIv6 root ROM table",
2792 .usage = "[ap_num | 'root']",
2796 .handler = dap_apsel_command,
2797 .mode = COMMAND_ANY,
2798 .help = "Set the currently selected AP (default 0) "
2799 "and display the result",
2800 .usage = "[ap_num]",
2804 .handler = dap_apcsw_command,
2805 .mode = COMMAND_ANY,
2806 .help = "Set CSW default bits",
2807 .usage = "[value [mask]]",
2812 .handler = dap_apid_command,
2813 .mode = COMMAND_EXEC,
2814 .help = "return ID register from AP "
2815 "(default currently selected AP)",
2816 .usage = "[ap_num]",
2820 .handler = dap_apreg_command,
2821 .mode = COMMAND_EXEC,
2822 .help = "read/write a register from AP "
2823 "(reg is byte address of a word register, like 0 4 8...)",
2824 .usage = "ap_num reg [value]",
2828 .handler = dap_dpreg_command,
2829 .mode = COMMAND_EXEC,
2830 .help = "read/write a register from DP "
2831 "(reg is byte address (bank << 4 | reg) of a word register, like 0 4 8...)",
2832 .usage = "reg [value]",
2836 .handler = dap_baseaddr_command,
2837 .mode = COMMAND_EXEC,
2838 .help = "return debug base address from MEM-AP "
2839 "(default currently selected AP)",
2840 .usage = "[ap_num]",
2843 .name = "memaccess",
2844 .handler = dap_memaccess_command,
2845 .mode = COMMAND_EXEC,
2846 .help = "set/get number of extra tck for MEM-AP memory "
2847 "bus access [0-255]",
2848 .usage = "[cycles]",
2851 .name = "ti_be_32_quirks",
2852 .handler = dap_ti_be_32_quirks_command,
2853 .mode = COMMAND_CONFIG,
2854 .help = "set/get quirks mode for TI TMS450/TMS570 processors",
2855 .usage = "[enable]",
2858 .name = "nu_npcx_quirks",
2859 .handler = dap_nu_npcx_quirks_command,
2860 .mode = COMMAND_CONFIG,
2861 .help = "set/get quirks mode for Nuvoton NPCX controllers",
2862 .usage = "[enable]",
2864 COMMAND_REGISTRATION_DONE