1 /***************************************************************************
2 * Copyright (C) 2006 by Magnus Lundin *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
8 * Copyright (C) 2009-2010 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
11 * Copyright (C) 2009-2010 by David Brownell *
13 * Copyright (C) 2013 by Andreas Fritiofson *
14 * andreas.fritiofson@gmail.com *
16 * Copyright (C) 2019-2021, Ampere Computing LLC *
18 * This program is free software; you can redistribute it and/or modify *
19 * it under the terms of the GNU General Public License as published by *
20 * the Free Software Foundation; either version 2 of the License, or *
21 * (at your option) any later version. *
23 * This program is distributed in the hope that it will be useful, *
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
26 * GNU General Public License for more details. *
28 * You should have received a copy of the GNU General Public License *
29 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
30 ***************************************************************************/
34 * This file implements support for the ARM Debug Interface version 5 (ADIv5)
35 * debugging architecture. Compared with previous versions, this includes
36 * a low pin-count Serial Wire Debug (SWD) alternative to JTAG for message
37 * transport, and focuses on memory mapped resources as defined by the
38 * CoreSight architecture.
40 * A key concept in ADIv5 is the Debug Access Port, or DAP. A DAP has two
41 * basic components: a Debug Port (DP) transporting messages to and from a
42 * debugger, and an Access Port (AP) accessing resources. Three types of DP
43 * are defined. One uses only JTAG for communication, and is called JTAG-DP.
44 * One uses only SWD for communication, and is called SW-DP. The third can
45 * use either SWD or JTAG, and is called SWJ-DP. The most common type of AP
46 * is used to access memory mapped resources and is called a MEM-AP. Also a
47 * JTAG-AP is also defined, bridging to JTAG resources; those are uncommon.
49 * This programming interface allows DAP pipelined operations through a
50 * transaction queue. This primarily affects AP operations (such as using
51 * a MEM-AP to access memory or registers). If the current transaction has
52 * not finished by the time the next one must begin, and the ORUNDETECT bit
53 * is set in the DP_CTRL_STAT register, the SSTICKYORUN status is set and
54 * further AP operations will fail. There are two basic methods to avoid
55 * such overrun errors. One involves polling for status instead of using
56 * transaction pipelining. The other involves adding delays to ensure the
57 * AP has enough time to complete one operation before starting the next
58 * one. (For JTAG these delays are controlled by memaccess_tck.)
62 * Relevant specifications from ARM include:
64 * ARM(tm) Debug Interface v5 Architecture Specification ARM IHI 0031E
65 * CoreSight(tm) v1.0 Architecture Specification ARM IHI 0029B
67 * CoreSight(tm) DAP-Lite TRM, ARM DDI 0316D
68 * Cortex-M3(tm) TRM, ARM DDI 0337G
75 #include "jtag/interface.h"
77 #include "arm_adi_v5.h"
78 #include "arm_coresight.h"
80 #include "transport/transport.h"
81 #include <helper/align.h>
82 #include <helper/jep106.h>
83 #include <helper/time_support.h>
84 #include <helper/list.h>
85 #include <helper/jim-nvp.h>
87 /* ARM ADI Specification requires at least 10 bits used for TAR autoincrement */
90 uint32_t tar_block_size(uint32_t address)
91 Return the largest block starting at address that does not cross a tar block size alignment boundary
93 static uint32_t max_tar_block_size(uint32_t tar_autoincr_block, target_addr_t address)
95 return tar_autoincr_block - ((tar_autoincr_block - 1) & address);
98 /***************************************************************************
100 * DP and MEM-AP register access through APACC and DPACC *
102 ***************************************************************************/
104 static int mem_ap_setup_csw(struct adiv5_ap *ap, uint32_t csw)
106 csw |= ap->csw_default;
108 if (csw != ap->csw_value) {
109 /* LOG_DEBUG("DAP: Set CSW %x",csw); */
110 int retval = dap_queue_ap_write(ap, MEM_AP_REG_CSW(ap->dap), csw);
111 if (retval != ERROR_OK) {
120 static int mem_ap_setup_tar(struct adiv5_ap *ap, target_addr_t tar)
122 if (!ap->tar_valid || tar != ap->tar_value) {
123 /* LOG_DEBUG("DAP: Set TAR %x",tar); */
124 int retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR(ap->dap), (uint32_t)(tar & 0xffffffffUL));
125 if (retval == ERROR_OK && is_64bit_ap(ap)) {
126 /* See if bits 63:32 of tar is different from last setting */
127 if ((ap->tar_value >> 32) != (tar >> 32))
128 retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR64(ap->dap), (uint32_t)(tar >> 32));
130 if (retval != ERROR_OK) {
131 ap->tar_valid = false;
135 ap->tar_valid = true;
140 static int mem_ap_read_tar(struct adiv5_ap *ap, target_addr_t *tar)
145 int retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR(ap->dap), &lower);
146 if (retval == ERROR_OK && is_64bit_ap(ap))
147 retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR64(ap->dap), &upper);
149 if (retval != ERROR_OK) {
150 ap->tar_valid = false;
154 retval = dap_run(ap->dap);
155 if (retval != ERROR_OK) {
156 ap->tar_valid = false;
160 *tar = (((target_addr_t)upper) << 32) | (target_addr_t)lower;
162 ap->tar_value = *tar;
163 ap->tar_valid = true;
167 static uint32_t mem_ap_get_tar_increment(struct adiv5_ap *ap)
169 switch (ap->csw_value & CSW_ADDRINC_MASK) {
170 case CSW_ADDRINC_SINGLE:
171 switch (ap->csw_value & CSW_SIZE_MASK) {
181 case CSW_ADDRINC_PACKED:
187 /* mem_ap_update_tar_cache is called after an access to MEM_AP_REG_DRW
189 static void mem_ap_update_tar_cache(struct adiv5_ap *ap)
194 uint32_t inc = mem_ap_get_tar_increment(ap);
195 if (inc >= max_tar_block_size(ap->tar_autoincr_block, ap->tar_value))
196 ap->tar_valid = false;
198 ap->tar_value += inc;
202 * Queue transactions setting up transfer parameters for the
203 * currently selected MEM-AP.
205 * Subsequent transfers using registers like MEM_AP_REG_DRW or MEM_AP_REG_BD2
206 * initiate data reads or writes using memory or peripheral addresses.
207 * If the CSW is configured for it, the TAR may be automatically
208 * incremented after each transfer.
210 * @param ap The MEM-AP.
211 * @param csw MEM-AP Control/Status Word (CSW) register to assign. If this
212 * matches the cached value, the register is not changed.
213 * @param tar MEM-AP Transfer Address Register (TAR) to assign. If this
214 * matches the cached address, the register is not changed.
216 * @return ERROR_OK if the transaction was properly queued, else a fault code.
218 static int mem_ap_setup_transfer(struct adiv5_ap *ap, uint32_t csw, target_addr_t tar)
221 retval = mem_ap_setup_csw(ap, csw);
222 if (retval != ERROR_OK)
224 retval = mem_ap_setup_tar(ap, tar);
225 if (retval != ERROR_OK)
231 * Asynchronous (queued) read of a word from memory or a system register.
233 * @param ap The MEM-AP to access.
234 * @param address Address of the 32-bit word to read; it must be
235 * readable by the currently selected MEM-AP.
236 * @param value points to where the word will be stored when the
237 * transaction queue is flushed (assuming no errors).
239 * @return ERROR_OK for success. Otherwise a fault code.
241 int mem_ap_read_u32(struct adiv5_ap *ap, target_addr_t address,
246 /* Use banked addressing (REG_BDx) to avoid some link traffic
247 * (updating TAR) when reading several consecutive addresses.
249 retval = mem_ap_setup_transfer(ap,
250 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
251 address & 0xFFFFFFFFFFFFFFF0ull);
252 if (retval != ERROR_OK)
255 return dap_queue_ap_read(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC), value);
259 * Synchronous read of a word from memory or a system register.
260 * As a side effect, this flushes any queued transactions.
262 * @param ap The MEM-AP to access.
263 * @param address Address of the 32-bit word to read; it must be
264 * readable by the currently selected MEM-AP.
265 * @param value points to where the result will be stored.
267 * @return ERROR_OK for success; *value holds the result.
268 * Otherwise a fault code.
270 int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
275 retval = mem_ap_read_u32(ap, address, value);
276 if (retval != ERROR_OK)
279 return dap_run(ap->dap);
283 * Asynchronous (queued) write of a word to memory or a system register.
285 * @param ap The MEM-AP to access.
286 * @param address Address to be written; it must be writable by
287 * the currently selected MEM-AP.
288 * @param value Word that will be written to the address when transaction
289 * queue is flushed (assuming no errors).
291 * @return ERROR_OK for success. Otherwise a fault code.
293 int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address,
298 /* Use banked addressing (REG_BDx) to avoid some link traffic
299 * (updating TAR) when writing several consecutive addresses.
301 retval = mem_ap_setup_transfer(ap,
302 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
303 address & 0xFFFFFFFFFFFFFFF0ull);
304 if (retval != ERROR_OK)
307 return dap_queue_ap_write(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC),
312 * Synchronous write of a word to memory or a system register.
313 * As a side effect, this flushes any queued transactions.
315 * @param ap The MEM-AP to access.
316 * @param address Address to be written; it must be writable by
317 * the currently selected MEM-AP.
318 * @param value Word that will be written.
320 * @return ERROR_OK for success; the data was written. Otherwise a fault code.
322 int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
325 int retval = mem_ap_write_u32(ap, address, value);
327 if (retval != ERROR_OK)
330 return dap_run(ap->dap);
334 * Synchronous write of a block of memory, using a specific access size.
336 * @param ap The MEM-AP to access.
337 * @param buffer The data buffer to write. No particular alignment is assumed.
338 * @param size Which access size to use, in bytes. 1, 2 or 4.
339 * @param count The number of writes to do (in size units, not bytes).
340 * @param address Address to be written; it must be writable by the currently selected MEM-AP.
341 * @param addrinc Whether the target address should be increased for each write or not. This
342 * should normally be true, except when writing to e.g. a FIFO.
343 * @return ERROR_OK on success, otherwise an error code.
345 static int mem_ap_write(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count,
346 target_addr_t address, bool addrinc)
348 struct adiv5_dap *dap = ap->dap;
349 size_t nbytes = size * count;
350 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
352 target_addr_t addr_xor;
353 int retval = ERROR_OK;
355 /* TI BE-32 Quirks mode:
356 * Writes on big-endian TMS570 behave very strangely. Observed behavior:
357 * size write address bytes written in order
358 * 4 TAR ^ 0 (val >> 24), (val >> 16), (val >> 8), (val)
359 * 2 TAR ^ 2 (val >> 8), (val)
361 * For example, if you attempt to write a single byte to address 0, the processor
362 * will actually write a byte to address 3.
364 * To make writes of size < 4 work as expected, we xor a value with the address before
365 * setting the TAP, and we set the TAP after every transfer rather then relying on
366 * address increment. */
369 csw_size = CSW_32BIT;
371 } else if (size == 2) {
372 csw_size = CSW_16BIT;
373 addr_xor = dap->ti_be_32_quirks ? 2 : 0;
374 } else if (size == 1) {
376 addr_xor = dap->ti_be_32_quirks ? 3 : 0;
378 return ERROR_TARGET_UNALIGNED_ACCESS;
381 if (ap->unaligned_access_bad && (address % size != 0))
382 return ERROR_TARGET_UNALIGNED_ACCESS;
385 uint32_t this_size = size;
387 /* Select packed transfer if possible */
388 if (addrinc && ap->packed_transfers && nbytes >= 4
389 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
391 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
393 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
396 if (retval != ERROR_OK)
399 retval = mem_ap_setup_tar(ap, address ^ addr_xor);
400 if (retval != ERROR_OK)
403 /* How many source bytes each transfer will consume, and their location in the DRW,
404 * depends on the type of transfer and alignment. See ARM document IHI0031C. */
405 uint32_t outvalue = 0;
406 uint32_t drw_byte_idx = address;
407 if (dap->ti_be_32_quirks) {
410 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
411 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
412 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx++ & 3) ^ addr_xor);
413 outvalue |= (uint32_t)*buffer++ << 8 * (3 ^ (drw_byte_idx & 3) ^ addr_xor);
416 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx++ & 3) ^ addr_xor);
417 outvalue |= (uint32_t)*buffer++ << 8 * (1 ^ (drw_byte_idx & 3) ^ addr_xor);
420 outvalue |= (uint32_t)*buffer++ << 8 * (0 ^ (drw_byte_idx & 3) ^ addr_xor);
426 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
427 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
430 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx++ & 3);
433 outvalue |= (uint32_t)*buffer++ << 8 * (drw_byte_idx & 3);
439 retval = dap_queue_ap_write(ap, MEM_AP_REG_DRW(dap), outvalue);
440 if (retval != ERROR_OK)
443 mem_ap_update_tar_cache(ap);
445 address += this_size;
448 /* REVISIT: Might want to have a queued version of this function that does not run. */
449 if (retval == ERROR_OK)
450 retval = dap_run(dap);
452 if (retval != ERROR_OK) {
454 if (mem_ap_read_tar(ap, &tar) == ERROR_OK)
455 LOG_ERROR("Failed to write memory at " TARGET_ADDR_FMT, tar);
457 LOG_ERROR("Failed to write memory and, additionally, failed to find out where");
464 * Synchronous read of a block of memory, using a specific access size.
466 * @param ap The MEM-AP to access.
467 * @param buffer The data buffer to receive the data. No particular alignment is assumed.
468 * @param size Which access size to use, in bytes. 1, 2 or 4.
469 * @param count The number of reads to do (in size units, not bytes).
470 * @param adr Address to be read; it must be readable by the currently selected MEM-AP.
471 * @param addrinc Whether the target address should be increased after each read or not. This
472 * should normally be true, except when reading from e.g. a FIFO.
473 * @return ERROR_OK on success, otherwise an error code.
475 static int mem_ap_read(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count,
476 target_addr_t adr, bool addrinc)
478 struct adiv5_dap *dap = ap->dap;
479 size_t nbytes = size * count;
480 const uint32_t csw_addrincr = addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
482 target_addr_t address = adr;
483 int retval = ERROR_OK;
485 /* TI BE-32 Quirks mode:
486 * Reads on big-endian TMS570 behave strangely differently than writes.
487 * They read from the physical address requested, but with DRW byte-reversed.
488 * For example, a byte read from address 0 will place the result in the high bytes of DRW.
489 * Also, packed 8-bit and 16-bit transfers seem to sometimes return garbage in some bytes,
493 csw_size = CSW_32BIT;
495 csw_size = CSW_16BIT;
499 return ERROR_TARGET_UNALIGNED_ACCESS;
501 if (ap->unaligned_access_bad && (adr % size != 0))
502 return ERROR_TARGET_UNALIGNED_ACCESS;
504 /* Allocate buffer to hold the sequence of DRW reads that will be made. This is a significant
505 * over-allocation if packed transfers are going to be used, but determining the real need at
506 * this point would be messy. */
507 uint32_t *read_buf = calloc(count, sizeof(uint32_t));
508 /* Multiplication count * sizeof(uint32_t) may overflow, calloc() is safe */
509 uint32_t *read_ptr = read_buf;
511 LOG_ERROR("Failed to allocate read buffer");
515 /* Queue up all reads. Each read will store the entire DRW word in the read buffer. How many
516 * useful bytes it contains, and their location in the word, depends on the type of transfer
519 uint32_t this_size = size;
521 /* Select packed transfer if possible */
522 if (addrinc && ap->packed_transfers && nbytes >= 4
523 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
525 retval = mem_ap_setup_csw(ap, csw_size | CSW_ADDRINC_PACKED);
527 retval = mem_ap_setup_csw(ap, csw_size | csw_addrincr);
529 if (retval != ERROR_OK)
532 retval = mem_ap_setup_tar(ap, address);
533 if (retval != ERROR_OK)
536 retval = dap_queue_ap_read(ap, MEM_AP_REG_DRW(dap), read_ptr++);
537 if (retval != ERROR_OK)
542 address += this_size;
544 mem_ap_update_tar_cache(ap);
547 if (retval == ERROR_OK)
548 retval = dap_run(dap);
552 nbytes = size * count;
555 /* If something failed, read TAR to find out how much data was successfully read, so we can
556 * at least give the caller what we have. */
557 if (retval != ERROR_OK) {
559 if (mem_ap_read_tar(ap, &tar) == ERROR_OK) {
560 /* TAR is incremented after failed transfer on some devices (eg Cortex-M4) */
561 LOG_ERROR("Failed to read memory at " TARGET_ADDR_FMT, tar);
562 if (nbytes > tar - address)
563 nbytes = tar - address;
565 LOG_ERROR("Failed to read memory and, additionally, failed to find out where");
570 /* Replay loop to populate caller's buffer from the correct word and byte lane */
572 uint32_t this_size = size;
574 if (addrinc && ap->packed_transfers && nbytes >= 4
575 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
579 if (dap->ti_be_32_quirks) {
582 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
583 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
586 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
589 *buffer++ = *read_ptr >> 8 * (3 - (address++ & 3));
594 *buffer++ = *read_ptr >> 8 * (address++ & 3);
595 *buffer++ = *read_ptr >> 8 * (address++ & 3);
598 *buffer++ = *read_ptr >> 8 * (address++ & 3);
601 *buffer++ = *read_ptr >> 8 * (address++ & 3);
613 int mem_ap_read_buf(struct adiv5_ap *ap,
614 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
616 return mem_ap_read(ap, buffer, size, count, address, true);
619 int mem_ap_write_buf(struct adiv5_ap *ap,
620 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
622 return mem_ap_write(ap, buffer, size, count, address, true);
625 int mem_ap_read_buf_noincr(struct adiv5_ap *ap,
626 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
628 return mem_ap_read(ap, buffer, size, count, address, false);
631 int mem_ap_write_buf_noincr(struct adiv5_ap *ap,
632 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
634 return mem_ap_write(ap, buffer, size, count, address, false);
637 /*--------------------------------------------------------------------------*/
640 #define DAP_POWER_DOMAIN_TIMEOUT (10)
642 /*--------------------------------------------------------------------------*/
645 * Invalidate cached DP select and cached TAR and CSW of all APs
647 void dap_invalidate_cache(struct adiv5_dap *dap)
649 dap->select = DP_SELECT_INVALID;
650 dap->last_read = NULL;
653 for (i = 0; i <= DP_APSEL_MAX; i++) {
654 /* force csw and tar write on the next mem-ap access */
655 dap->ap[i].tar_valid = false;
656 dap->ap[i].csw_value = 0;
661 * Initialize a DAP. This sets up the power domains, prepares the DP
662 * for further use and activates overrun checking.
664 * @param dap The DAP being initialized.
666 int dap_dp_init(struct adiv5_dap *dap)
670 LOG_DEBUG("%s", adiv5_dap_name(dap));
672 dap->do_reconnect = false;
673 dap_invalidate_cache(dap);
676 * Early initialize dap->dp_ctrl_stat.
677 * In jtag mode only, if the following queue run (in dap_dp_poll_register)
678 * fails and sets the sticky error, it will trigger the clearing
679 * of the sticky. Without this initialization system and debug power
680 * would be disabled while clearing the sticky error bit.
682 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
685 * This write operation clears the sticky error bit in jtag mode only and
686 * is ignored in swd mode. It also powers-up system and debug domains in
687 * both jtag and swd modes, if not done before.
689 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat | SSTICKYERR);
690 if (retval != ERROR_OK)
693 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
694 if (retval != ERROR_OK)
697 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
698 if (retval != ERROR_OK)
701 /* Check that we have debug power domains activated */
702 LOG_DEBUG("DAP: wait CDBGPWRUPACK");
703 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
704 CDBGPWRUPACK, CDBGPWRUPACK,
705 DAP_POWER_DOMAIN_TIMEOUT);
706 if (retval != ERROR_OK)
709 if (!dap->ignore_syspwrupack) {
710 LOG_DEBUG("DAP: wait CSYSPWRUPACK");
711 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
712 CSYSPWRUPACK, CSYSPWRUPACK,
713 DAP_POWER_DOMAIN_TIMEOUT);
714 if (retval != ERROR_OK)
718 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
719 if (retval != ERROR_OK)
722 /* With debug power on we can activate OVERRUN checking */
723 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ | CORUNDETECT;
724 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
725 if (retval != ERROR_OK)
727 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
728 if (retval != ERROR_OK)
731 retval = dap_run(dap);
732 if (retval != ERROR_OK)
739 * Initialize a DAP or do reconnect if DAP is not accessible.
741 * @param dap The DAP being initialized.
743 int dap_dp_init_or_reconnect(struct adiv5_dap *dap)
745 LOG_DEBUG("%s", adiv5_dap_name(dap));
748 * Early initialize dap->dp_ctrl_stat.
749 * In jtag mode only, if the following atomic reads fail and set the
750 * sticky error, it will trigger the clearing of the sticky. Without this
751 * initialization system and debug power would be disabled while clearing
752 * the sticky error bit.
754 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
756 dap->do_reconnect = false;
758 dap_dp_read_atomic(dap, DP_CTRL_STAT, NULL);
759 if (dap->do_reconnect) {
760 /* dap connect calls dap_dp_init() after transport dependent initialization */
761 return dap->ops->connect(dap);
763 return dap_dp_init(dap);
768 * Initialize a DAP. This sets up the power domains, prepares the DP
769 * for further use, and arranges to use AP #0 for all AP operations
770 * until dap_ap-select() changes that policy.
772 * @param ap The MEM-AP being initialized.
774 int mem_ap_init(struct adiv5_ap *ap)
776 /* check that we support packed transfers */
779 struct adiv5_dap *dap = ap->dap;
781 /* Set ap->cfg_reg before calling mem_ap_setup_transfer(). */
782 /* mem_ap_setup_transfer() needs to know if the MEM_AP supports LPAE. */
783 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &cfg);
784 if (retval != ERROR_OK)
787 retval = dap_run(dap);
788 if (retval != ERROR_OK)
792 ap->tar_valid = false;
793 ap->csw_value = 0; /* force csw and tar write */
794 retval = mem_ap_setup_transfer(ap, CSW_8BIT | CSW_ADDRINC_PACKED, 0);
795 if (retval != ERROR_OK)
798 retval = dap_queue_ap_read(ap, MEM_AP_REG_CSW(dap), &csw);
799 if (retval != ERROR_OK)
802 retval = dap_run(dap);
803 if (retval != ERROR_OK)
806 if (csw & CSW_ADDRINC_PACKED)
807 ap->packed_transfers = true;
809 ap->packed_transfers = false;
811 /* Packed transfers on TI BE-32 processors do not work correctly in
813 if (dap->ti_be_32_quirks)
814 ap->packed_transfers = false;
816 LOG_DEBUG("MEM_AP Packed Transfers: %s",
817 ap->packed_transfers ? "enabled" : "disabled");
819 /* The ARM ADI spec leaves implementation-defined whether unaligned
820 * memory accesses work, only work partially, or cause a sticky error.
821 * On TI BE-32 processors, reads seem to return garbage in some bytes
822 * and unaligned writes seem to cause a sticky error.
823 * TODO: it would be nice to have a way to detect whether unaligned
824 * operations are supported on other processors. */
825 ap->unaligned_access_bad = dap->ti_be_32_quirks;
827 LOG_DEBUG("MEM_AP CFG: large data %d, long address %d, big-endian %d",
828 !!(cfg & MEM_AP_REG_CFG_LD), !!(cfg & MEM_AP_REG_CFG_LA), !!(cfg & MEM_AP_REG_CFG_BE));
834 * Put the debug link into SWD mode, if the target supports it.
835 * The link's initial mode may be either JTAG (for example,
836 * with SWJ-DP after reset) or SWD.
838 * Note that targets using the JTAG-DP do not support SWD, and that
839 * some targets which could otherwise support it may have been
840 * configured to disable SWD signaling
842 * @param dap The DAP used
843 * @return ERROR_OK or else a fault code.
845 int dap_to_swd(struct adiv5_dap *dap)
847 LOG_DEBUG("Enter SWD mode");
849 return dap_send_sequence(dap, JTAG_TO_SWD);
853 * Put the debug link into JTAG mode, if the target supports it.
854 * The link's initial mode may be either SWD or JTAG.
856 * Note that targets implemented with SW-DP do not support JTAG, and
857 * that some targets which could otherwise support it may have been
858 * configured to disable JTAG signaling
860 * @param dap The DAP used
861 * @return ERROR_OK or else a fault code.
863 int dap_to_jtag(struct adiv5_dap *dap)
865 LOG_DEBUG("Enter JTAG mode");
867 return dap_send_sequence(dap, SWD_TO_JTAG);
870 /* CID interpretation -- see ARM IHI 0029E table B2-7
871 * and ARM IHI 0031E table D1-2.
873 * From 2009/11/25 commit 21378f58b604:
874 * "OptimoDE DESS" is ARM's semicustom DSPish stuff.
875 * Let's keep it as is, for the time being
877 static const char *class_description[16] = {
878 [0x0] = "Generic verification component",
887 [0x9] = "CoreSight component",
889 [0xB] = "Peripheral Test Block",
891 [0xD] = "OptimoDE DESS", /* see above */
892 [0xE] = "Generic IP component",
893 [0xF] = "CoreLink, PrimeCell or System component",
896 #define ARCH_ID(architect, archid) ( \
897 (((architect) << ARM_CS_C9_DEVARCH_ARCHITECT_SHIFT) & ARM_CS_C9_DEVARCH_ARCHITECT_MASK) | \
898 (((archid) << ARM_CS_C9_DEVARCH_ARCHID_SHIFT) & ARM_CS_C9_DEVARCH_ARCHID_MASK) \
901 static const struct {
903 const char *description;
904 } class0x9_devarch[] = {
905 /* keep same unsorted order as in ARM IHI0029E */
906 { ARCH_ID(ARM_ID, 0x0A00), "RAS architecture" },
907 { ARCH_ID(ARM_ID, 0x1A01), "Instrumentation Trace Macrocell (ITM) architecture" },
908 { ARCH_ID(ARM_ID, 0x1A02), "DWT architecture" },
909 { ARCH_ID(ARM_ID, 0x1A03), "Flash Patch and Breakpoint unit (FPB) architecture" },
910 { ARCH_ID(ARM_ID, 0x2A04), "Processor debug architecture (ARMv8-M)" },
911 { ARCH_ID(ARM_ID, 0x6A05), "Processor debug architecture (ARMv8-R)" },
912 { ARCH_ID(ARM_ID, 0x0A10), "PC sample-based profiling" },
913 { ARCH_ID(ARM_ID, 0x4A13), "Embedded Trace Macrocell (ETM) architecture" },
914 { ARCH_ID(ARM_ID, 0x1A14), "Cross Trigger Interface (CTI) architecture" },
915 { ARCH_ID(ARM_ID, 0x6A15), "Processor debug architecture (v8.0-A)" },
916 { ARCH_ID(ARM_ID, 0x7A15), "Processor debug architecture (v8.1-A)" },
917 { ARCH_ID(ARM_ID, 0x8A15), "Processor debug architecture (v8.2-A)" },
918 { ARCH_ID(ARM_ID, 0x2A16), "Processor Performance Monitor (PMU) architecture" },
919 { ARCH_ID(ARM_ID, 0x0A17), "Memory Access Port v2 architecture" },
920 { ARCH_ID(ARM_ID, 0x0A27), "JTAG Access Port v2 architecture" },
921 { ARCH_ID(ARM_ID, 0x0A31), "Basic trace router" },
922 { ARCH_ID(ARM_ID, 0x0A37), "Power requestor" },
923 { ARCH_ID(ARM_ID, 0x0A47), "Unknown Access Port v2 architecture" },
924 { ARCH_ID(ARM_ID, 0x0A50), "HSSTP architecture" },
925 { ARCH_ID(ARM_ID, 0x0A63), "System Trace Macrocell (STM) architecture" },
926 { ARCH_ID(ARM_ID, 0x0A75), "CoreSight ELA architecture" },
927 { ARCH_ID(ARM_ID, 0x0AF7), "CoreSight ROM architecture" },
930 #define DEVARCH_ID_MASK (ARM_CS_C9_DEVARCH_ARCHITECT_MASK | ARM_CS_C9_DEVARCH_ARCHID_MASK)
931 #define DEVARCH_ROM_C_0X9 ARCH_ID(ARM_ID, 0x0AF7)
933 static const char *class0x9_devarch_description(uint32_t devarch)
935 if (!(devarch & ARM_CS_C9_DEVARCH_PRESENT))
936 return "not present";
938 for (unsigned int i = 0; i < ARRAY_SIZE(class0x9_devarch); i++)
939 if ((devarch & DEVARCH_ID_MASK) == class0x9_devarch[i].arch_id)
940 return class0x9_devarch[i].description;
945 static const struct {
947 const char *description;
949 { AP_TYPE_JTAG_AP, "JTAG-AP" },
950 { AP_TYPE_COM_AP, "COM-AP" },
951 { AP_TYPE_AHB3_AP, "MEM-AP AHB3" },
952 { AP_TYPE_APB_AP, "MEM-AP APB2 or APB3" },
953 { AP_TYPE_AXI_AP, "MEM-AP AXI3 or AXI4" },
954 { AP_TYPE_AHB5_AP, "MEM-AP AHB5" },
955 { AP_TYPE_APB4_AP, "MEM-AP APB4" },
956 { AP_TYPE_AXI5_AP, "MEM-AP AXI5" },
957 { AP_TYPE_AHB5H_AP, "MEM-AP AHB5 with enhanced HPROT" },
960 static const char *ap_type_to_description(enum ap_type type)
962 for (unsigned int i = 0; i < ARRAY_SIZE(ap_types); i++)
963 if (type == ap_types[i].type)
964 return ap_types[i].description;
969 bool is_ap_num_valid(struct adiv5_dap *dap, uint64_t ap_num)
974 /* no autodetection, by now, so uninitialized is equivalent to ADIv5 for
975 * backward compatibility */
976 if (!is_adiv6(dap)) {
977 if (ap_num > DP_APSEL_MAX)
983 if (ap_num & 0x0fffULL)
986 if (ap_num & ((~0ULL) << dap->asize))
995 * This function checks the ID for each access port to find the requested Access Port type
996 * It also calls dap_get_ap() to increment the AP refcount
998 int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
1000 if (is_adiv6(dap)) {
1001 /* TODO: scan the ROM table and detect the AP available */
1002 LOG_DEBUG("On ADIv6 we cannot scan all the possible AP");
1006 /* Maximum AP number is 255 since the SELECT register is 8 bits */
1007 for (unsigned int ap_num = 0; ap_num <= DP_APSEL_MAX; ap_num++) {
1008 struct adiv5_ap *ap = dap_get_ap(dap, ap_num);
1012 /* read the IDR register of the Access Port */
1013 uint32_t id_val = 0;
1015 int retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &id_val);
1016 if (retval != ERROR_OK) {
1021 retval = dap_run(dap);
1023 /* Reading register for a non-existent AP should not cause an error,
1024 * but just to be sure, try to continue searching if an error does happen.
1026 if (retval == ERROR_OK && (id_val & AP_TYPE_MASK) == type_to_find) {
1027 LOG_DEBUG("Found %s at AP index: %d (IDR=0x%08" PRIX32 ")",
1028 ap_type_to_description(type_to_find),
1037 LOG_DEBUG("No %s found", ap_type_to_description(type_to_find));
1041 static inline bool is_ap_in_use(struct adiv5_ap *ap)
1043 return ap->refcount > 0 || ap->config_ap_never_release;
1046 static struct adiv5_ap *_dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1048 if (!is_ap_num_valid(dap, ap_num)) {
1049 LOG_ERROR("Invalid AP#0x%" PRIx64, ap_num);
1052 if (is_adiv6(dap)) {
1053 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1054 struct adiv5_ap *ap = &dap->ap[i];
1055 if (is_ap_in_use(ap) && ap->ap_num == ap_num) {
1060 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1061 struct adiv5_ap *ap = &dap->ap[i];
1062 if (!is_ap_in_use(ap)) {
1063 ap->ap_num = ap_num;
1068 LOG_ERROR("No more AP available!");
1073 struct adiv5_ap *ap = &dap->ap[ap_num];
1074 ap->ap_num = ap_num;
1079 /* Return AP with specified ap_num. Increment AP refcount */
1080 struct adiv5_ap *dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1082 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1084 LOG_DEBUG("refcount AP#0x%" PRIx64 " get %u", ap_num, ap->refcount);
1088 /* Return AP with specified ap_num. Increment AP refcount and keep it non-zero */
1089 struct adiv5_ap *dap_get_config_ap(struct adiv5_dap *dap, uint64_t ap_num)
1091 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1093 ap->config_ap_never_release = true;
1094 LOG_DEBUG("refcount AP#0x%" PRIx64 " get_config %u", ap_num, ap->refcount);
1099 /* Decrement AP refcount and release the AP when refcount reaches zero */
1100 int dap_put_ap(struct adiv5_ap *ap)
1102 if (ap->refcount == 0) {
1103 LOG_ERROR("BUG: refcount AP#0x%" PRIx64 " put underflow", ap->ap_num);
1109 LOG_DEBUG("refcount AP#0x%" PRIx64 " put %u", ap->ap_num, ap->refcount);
1110 if (!is_ap_in_use(ap)) {
1111 /* defaults from dap_instance_init() */
1112 ap->ap_num = DP_APSEL_INVALID;
1113 ap->memaccess_tck = 255;
1114 ap->tar_autoincr_block = (1 << 10);
1115 ap->csw_default = CSW_AHB_DEFAULT;
1116 ap->cfg_reg = MEM_AP_REG_CFG_INVALID;
1121 static int dap_get_debugbase(struct adiv5_ap *ap,
1122 target_addr_t *dbgbase, uint32_t *apid)
1124 struct adiv5_dap *dap = ap->dap;
1126 uint32_t baseptr_upper, baseptr_lower;
1128 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID) {
1129 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
1130 if (retval != ERROR_OK)
1133 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseptr_lower);
1134 if (retval != ERROR_OK)
1136 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), apid);
1137 if (retval != ERROR_OK)
1139 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
1140 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap)) {
1141 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseptr_upper);
1142 if (retval != ERROR_OK)
1146 retval = dap_run(dap);
1147 if (retval != ERROR_OK)
1150 if (!is_64bit_ap(ap))
1152 *dbgbase = (((target_addr_t)baseptr_upper) << 32) | baseptr_lower;
1157 /** Holds registers and coordinates of a CoreSight component */
1158 struct cs_component_vals {
1159 struct adiv5_ap *ap;
1160 target_addr_t component_base;
1165 uint32_t devtype_memtype;
1169 * Read the CoreSight registers needed during ROM Table Parsing (RTP).
1171 * @param ap Pointer to AP containing the component.
1172 * @param component_base On MEM-AP access method, base address of the component.
1173 * @param v Pointer to the struct holding the value of registers.
1175 * @return ERROR_OK on success, else a fault code.
1177 static int rtp_read_cs_regs(struct adiv5_ap *ap, target_addr_t component_base,
1178 struct cs_component_vals *v)
1180 assert(IS_ALIGNED(component_base, ARM_CS_ALIGN));
1183 uint32_t cid0, cid1, cid2, cid3;
1184 uint32_t pid0, pid1, pid2, pid3, pid4;
1185 int retval = ERROR_OK;
1188 v->component_base = component_base;
1190 /* sort by offset to gain speed */
1193 * Registers DEVARCH, DEVID and DEVTYPE are valid on Class 0x9 devices
1194 * only, but are at offset above 0xf00, so can be read on any device
1195 * without triggering error. Read them for eventual use on Class 0x9.
1197 if (retval == ERROR_OK)
1198 retval = mem_ap_read_u32(ap, component_base + ARM_CS_C9_DEVARCH, &v->devarch);
1200 if (retval == ERROR_OK)
1201 retval = mem_ap_read_u32(ap, component_base + ARM_CS_C9_DEVID, &v->devid);
1203 /* Same address as ARM_CS_C1_MEMTYPE */
1204 if (retval == ERROR_OK)
1205 retval = mem_ap_read_u32(ap, component_base + ARM_CS_C9_DEVTYPE, &v->devtype_memtype);
1207 if (retval == ERROR_OK)
1208 retval = mem_ap_read_u32(ap, component_base + ARM_CS_PIDR4, &pid4);
1210 if (retval == ERROR_OK)
1211 retval = mem_ap_read_u32(ap, component_base + ARM_CS_PIDR0, &pid0);
1212 if (retval == ERROR_OK)
1213 retval = mem_ap_read_u32(ap, component_base + ARM_CS_PIDR1, &pid1);
1214 if (retval == ERROR_OK)
1215 retval = mem_ap_read_u32(ap, component_base + ARM_CS_PIDR2, &pid2);
1216 if (retval == ERROR_OK)
1217 retval = mem_ap_read_u32(ap, component_base + ARM_CS_PIDR3, &pid3);
1219 if (retval == ERROR_OK)
1220 retval = mem_ap_read_u32(ap, component_base + ARM_CS_CIDR0, &cid0);
1221 if (retval == ERROR_OK)
1222 retval = mem_ap_read_u32(ap, component_base + ARM_CS_CIDR1, &cid1);
1223 if (retval == ERROR_OK)
1224 retval = mem_ap_read_u32(ap, component_base + ARM_CS_CIDR2, &cid2);
1225 if (retval == ERROR_OK)
1226 retval = mem_ap_read_u32(ap, component_base + ARM_CS_CIDR3, &cid3);
1228 if (retval == ERROR_OK)
1229 retval = dap_run(ap->dap);
1230 if (retval != ERROR_OK) {
1231 LOG_DEBUG("Failed read CoreSight registers");
1235 v->cid = (cid3 & 0xff) << 24
1236 | (cid2 & 0xff) << 16
1237 | (cid1 & 0xff) << 8
1239 v->pid = (uint64_t)(pid4 & 0xff) << 32
1240 | (pid3 & 0xff) << 24
1241 | (pid2 & 0xff) << 16
1242 | (pid1 & 0xff) << 8
1248 /* Part number interpretations are from Cortex
1249 * core specs, the CoreSight components TRM
1250 * (ARM DDI 0314H), CoreSight System Design
1251 * Guide (ARM DGI 0012D) and ETM specs; also
1252 * from chip observation (e.g. TI SDTI).
1255 static const struct dap_part_nums {
1256 uint16_t designer_id;
1260 } dap_part_nums[] = {
1261 { ARM_ID, 0x000, "Cortex-M3 SCS", "(System Control Space)", },
1262 { ARM_ID, 0x001, "Cortex-M3 ITM", "(Instrumentation Trace Module)", },
1263 { ARM_ID, 0x002, "Cortex-M3 DWT", "(Data Watchpoint and Trace)", },
1264 { ARM_ID, 0x003, "Cortex-M3 FPB", "(Flash Patch and Breakpoint)", },
1265 { ARM_ID, 0x008, "Cortex-M0 SCS", "(System Control Space)", },
1266 { ARM_ID, 0x00a, "Cortex-M0 DWT", "(Data Watchpoint and Trace)", },
1267 { ARM_ID, 0x00b, "Cortex-M0 BPU", "(Breakpoint Unit)", },
1268 { ARM_ID, 0x00c, "Cortex-M4 SCS", "(System Control Space)", },
1269 { ARM_ID, 0x00d, "CoreSight ETM11", "(Embedded Trace)", },
1270 { ARM_ID, 0x00e, "Cortex-M7 FPB", "(Flash Patch and Breakpoint)", },
1271 { ARM_ID, 0x193, "SoC-600 TSGEN", "(Timestamp Generator)", },
1272 { ARM_ID, 0x470, "Cortex-M1 ROM", "(ROM Table)", },
1273 { ARM_ID, 0x471, "Cortex-M0 ROM", "(ROM Table)", },
1274 { ARM_ID, 0x490, "Cortex-A15 GIC", "(Generic Interrupt Controller)", },
1275 { ARM_ID, 0x492, "Cortex-R52 GICD", "(Distributor)", },
1276 { ARM_ID, 0x493, "Cortex-R52 GICR", "(Redistributor)", },
1277 { ARM_ID, 0x4a1, "Cortex-A53 ROM", "(v8 Memory Map ROM Table)", },
1278 { ARM_ID, 0x4a2, "Cortex-A57 ROM", "(ROM Table)", },
1279 { ARM_ID, 0x4a3, "Cortex-A53 ROM", "(v7 Memory Map ROM Table)", },
1280 { ARM_ID, 0x4a4, "Cortex-A72 ROM", "(ROM Table)", },
1281 { ARM_ID, 0x4a9, "Cortex-A9 ROM", "(ROM Table)", },
1282 { ARM_ID, 0x4aa, "Cortex-A35 ROM", "(v8 Memory Map ROM Table)", },
1283 { ARM_ID, 0x4af, "Cortex-A15 ROM", "(ROM Table)", },
1284 { ARM_ID, 0x4b5, "Cortex-R5 ROM", "(ROM Table)", },
1285 { ARM_ID, 0x4b8, "Cortex-R52 ROM", "(ROM Table)", },
1286 { ARM_ID, 0x4c0, "Cortex-M0+ ROM", "(ROM Table)", },
1287 { ARM_ID, 0x4c3, "Cortex-M3 ROM", "(ROM Table)", },
1288 { ARM_ID, 0x4c4, "Cortex-M4 ROM", "(ROM Table)", },
1289 { ARM_ID, 0x4c7, "Cortex-M7 PPB ROM", "(Private Peripheral Bus ROM Table)", },
1290 { ARM_ID, 0x4c8, "Cortex-M7 ROM", "(ROM Table)", },
1291 { ARM_ID, 0x4e0, "Cortex-A35 ROM", "(v7 Memory Map ROM Table)", },
1292 { ARM_ID, 0x4e4, "Cortex-A76 ROM", "(ROM Table)", },
1293 { ARM_ID, 0x906, "CoreSight CTI", "(Cross Trigger)", },
1294 { ARM_ID, 0x907, "CoreSight ETB", "(Trace Buffer)", },
1295 { ARM_ID, 0x908, "CoreSight CSTF", "(Trace Funnel)", },
1296 { ARM_ID, 0x909, "CoreSight ATBR", "(Advanced Trace Bus Replicator)", },
1297 { ARM_ID, 0x910, "CoreSight ETM9", "(Embedded Trace)", },
1298 { ARM_ID, 0x912, "CoreSight TPIU", "(Trace Port Interface Unit)", },
1299 { ARM_ID, 0x913, "CoreSight ITM", "(Instrumentation Trace Macrocell)", },
1300 { ARM_ID, 0x914, "CoreSight SWO", "(Single Wire Output)", },
1301 { ARM_ID, 0x917, "CoreSight HTM", "(AHB Trace Macrocell)", },
1302 { ARM_ID, 0x920, "CoreSight ETM11", "(Embedded Trace)", },
1303 { ARM_ID, 0x921, "Cortex-A8 ETM", "(Embedded Trace)", },
1304 { ARM_ID, 0x922, "Cortex-A8 CTI", "(Cross Trigger)", },
1305 { ARM_ID, 0x923, "Cortex-M3 TPIU", "(Trace Port Interface Unit)", },
1306 { ARM_ID, 0x924, "Cortex-M3 ETM", "(Embedded Trace)", },
1307 { ARM_ID, 0x925, "Cortex-M4 ETM", "(Embedded Trace)", },
1308 { ARM_ID, 0x930, "Cortex-R4 ETM", "(Embedded Trace)", },
1309 { ARM_ID, 0x931, "Cortex-R5 ETM", "(Embedded Trace)", },
1310 { ARM_ID, 0x932, "CoreSight MTB-M0+", "(Micro Trace Buffer)", },
1311 { ARM_ID, 0x941, "CoreSight TPIU-Lite", "(Trace Port Interface Unit)", },
1312 { ARM_ID, 0x950, "Cortex-A9 PTM", "(Program Trace Macrocell)", },
1313 { ARM_ID, 0x955, "Cortex-A5 ETM", "(Embedded Trace)", },
1314 { ARM_ID, 0x95a, "Cortex-A72 ETM", "(Embedded Trace)", },
1315 { ARM_ID, 0x95b, "Cortex-A17 PTM", "(Program Trace Macrocell)", },
1316 { ARM_ID, 0x95d, "Cortex-A53 ETM", "(Embedded Trace)", },
1317 { ARM_ID, 0x95e, "Cortex-A57 ETM", "(Embedded Trace)", },
1318 { ARM_ID, 0x95f, "Cortex-A15 PTM", "(Program Trace Macrocell)", },
1319 { ARM_ID, 0x961, "CoreSight TMC", "(Trace Memory Controller)", },
1320 { ARM_ID, 0x962, "CoreSight STM", "(System Trace Macrocell)", },
1321 { ARM_ID, 0x975, "Cortex-M7 ETM", "(Embedded Trace)", },
1322 { ARM_ID, 0x9a0, "CoreSight PMU", "(Performance Monitoring Unit)", },
1323 { ARM_ID, 0x9a1, "Cortex-M4 TPIU", "(Trace Port Interface Unit)", },
1324 { ARM_ID, 0x9a4, "CoreSight GPR", "(Granular Power Requester)", },
1325 { ARM_ID, 0x9a5, "Cortex-A5 PMU", "(Performance Monitor Unit)", },
1326 { ARM_ID, 0x9a7, "Cortex-A7 PMU", "(Performance Monitor Unit)", },
1327 { ARM_ID, 0x9a8, "Cortex-A53 CTI", "(Cross Trigger)", },
1328 { ARM_ID, 0x9a9, "Cortex-M7 TPIU", "(Trace Port Interface Unit)", },
1329 { ARM_ID, 0x9ae, "Cortex-A17 PMU", "(Performance Monitor Unit)", },
1330 { ARM_ID, 0x9af, "Cortex-A15 PMU", "(Performance Monitor Unit)", },
1331 { ARM_ID, 0x9b6, "Cortex-R52 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1332 { ARM_ID, 0x9b7, "Cortex-R7 PMU", "(Performance Monitor Unit)", },
1333 { ARM_ID, 0x9d3, "Cortex-A53 PMU", "(Performance Monitor Unit)", },
1334 { ARM_ID, 0x9d7, "Cortex-A57 PMU", "(Performance Monitor Unit)", },
1335 { ARM_ID, 0x9d8, "Cortex-A72 PMU", "(Performance Monitor Unit)", },
1336 { ARM_ID, 0x9da, "Cortex-A35 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1337 { ARM_ID, 0x9e2, "SoC-600 APB-AP", "(APB4 Memory Access Port)", },
1338 { ARM_ID, 0x9e3, "SoC-600 AHB-AP", "(AHB5 Memory Access Port)", },
1339 { ARM_ID, 0x9e4, "SoC-600 AXI-AP", "(AXI Memory Access Port)", },
1340 { ARM_ID, 0x9e5, "SoC-600 APv1 Adapter", "(Access Port v1 Adapter)", },
1341 { ARM_ID, 0x9e6, "SoC-600 JTAG-AP", "(JTAG Access Port)", },
1342 { ARM_ID, 0x9e7, "SoC-600 TPIU", "(Trace Port Interface Unit)", },
1343 { ARM_ID, 0x9e8, "SoC-600 TMC ETR/ETS", "(Embedded Trace Router/Streamer)", },
1344 { ARM_ID, 0x9e9, "SoC-600 TMC ETB", "(Embedded Trace Buffer)", },
1345 { ARM_ID, 0x9ea, "SoC-600 TMC ETF", "(Embedded Trace FIFO)", },
1346 { ARM_ID, 0x9eb, "SoC-600 ATB Funnel", "(Trace Funnel)", },
1347 { ARM_ID, 0x9ec, "SoC-600 ATB Replicator", "(Trace Replicator)", },
1348 { ARM_ID, 0x9ed, "SoC-600 CTI", "(Cross Trigger)", },
1349 { ARM_ID, 0x9ee, "SoC-600 CATU", "(Address Translation Unit)", },
1350 { ARM_ID, 0xc05, "Cortex-A5 Debug", "(Debug Unit)", },
1351 { ARM_ID, 0xc07, "Cortex-A7 Debug", "(Debug Unit)", },
1352 { ARM_ID, 0xc08, "Cortex-A8 Debug", "(Debug Unit)", },
1353 { ARM_ID, 0xc09, "Cortex-A9 Debug", "(Debug Unit)", },
1354 { ARM_ID, 0xc0e, "Cortex-A17 Debug", "(Debug Unit)", },
1355 { ARM_ID, 0xc0f, "Cortex-A15 Debug", "(Debug Unit)", },
1356 { ARM_ID, 0xc14, "Cortex-R4 Debug", "(Debug Unit)", },
1357 { ARM_ID, 0xc15, "Cortex-R5 Debug", "(Debug Unit)", },
1358 { ARM_ID, 0xc17, "Cortex-R7 Debug", "(Debug Unit)", },
1359 { ARM_ID, 0xd03, "Cortex-A53 Debug", "(Debug Unit)", },
1360 { ARM_ID, 0xd04, "Cortex-A35 Debug", "(Debug Unit)", },
1361 { ARM_ID, 0xd07, "Cortex-A57 Debug", "(Debug Unit)", },
1362 { ARM_ID, 0xd08, "Cortex-A72 Debug", "(Debug Unit)", },
1363 { ARM_ID, 0xd0b, "Cortex-A76 Debug", "(Debug Unit)", },
1364 { ARM_ID, 0xd0c, "Neoverse N1", "(Debug Unit)", },
1365 { ARM_ID, 0xd13, "Cortex-R52 Debug", "(Debug Unit)", },
1366 { ARM_ID, 0xd49, "Neoverse N2", "(Debug Unit)", },
1367 { 0x017, 0x120, "TI SDTI", "(System Debug Trace Interface)", }, /* from OMAP3 memmap */
1368 { 0x017, 0x343, "TI DAPCTL", "", }, /* from OMAP3 memmap */
1369 { 0x017, 0x9af, "MSP432 ROM", "(ROM Table)" },
1370 { 0x01f, 0xcd0, "Atmel CPU with DSU", "(CPU)" },
1371 { 0x041, 0x1db, "XMC4500 ROM", "(ROM Table)" },
1372 { 0x041, 0x1df, "XMC4700/4800 ROM", "(ROM Table)" },
1373 { 0x041, 0x1ed, "XMC1000 ROM", "(ROM Table)" },
1374 { 0x065, 0x000, "SHARC+/Blackfin+", "", },
1375 { 0x070, 0x440, "Qualcomm QDSS Component v1", "(Qualcomm Designed CoreSight Component v1)", },
1376 { 0x0bf, 0x100, "Brahma-B53 Debug", "(Debug Unit)", },
1377 { 0x0bf, 0x9d3, "Brahma-B53 PMU", "(Performance Monitor Unit)", },
1378 { 0x0bf, 0x4a1, "Brahma-B53 ROM", "(ROM Table)", },
1379 { 0x0bf, 0x721, "Brahma-B53 ROM", "(ROM Table)", },
1380 { 0x1eb, 0x181, "Tegra 186 ROM", "(ROM Table)", },
1381 { 0x1eb, 0x202, "Denver ETM", "(Denver Embedded Trace)", },
1382 { 0x1eb, 0x211, "Tegra 210 ROM", "(ROM Table)", },
1383 { 0x1eb, 0x302, "Denver Debug", "(Debug Unit)", },
1384 { 0x1eb, 0x402, "Denver PMU", "(Performance Monitor Unit)", },
1387 static const struct dap_part_nums *pidr_to_part_num(unsigned int designer_id, unsigned int part_num)
1389 static const struct dap_part_nums unknown = {
1390 .type = "Unrecognized",
1394 for (unsigned int i = 0; i < ARRAY_SIZE(dap_part_nums); i++)
1395 if (dap_part_nums[i].designer_id == designer_id && dap_part_nums[i].part_num == part_num)
1396 return &dap_part_nums[i];
1401 static int dap_devtype_display(struct command_invocation *cmd, uint32_t devtype)
1403 const char *major = "Reserved", *subtype = "Reserved";
1404 const unsigned int minor = (devtype & ARM_CS_C9_DEVTYPE_SUB_MASK) >> ARM_CS_C9_DEVTYPE_SUB_SHIFT;
1405 const unsigned int devtype_major = (devtype & ARM_CS_C9_DEVTYPE_MAJOR_MASK) >> ARM_CS_C9_DEVTYPE_MAJOR_SHIFT;
1406 switch (devtype_major) {
1408 major = "Miscellaneous";
1414 subtype = "Validation component";
1419 major = "Trace Sink";
1436 major = "Trace Link";
1442 subtype = "Funnel, router";
1448 subtype = "FIFO, buffer";
1453 major = "Trace Source";
1459 subtype = "Processor";
1465 subtype = "Engine/Coprocessor";
1471 subtype = "Software";
1476 major = "Debug Control";
1482 subtype = "Trigger Matrix";
1485 subtype = "Debug Auth";
1488 subtype = "Power Requestor";
1493 major = "Debug Logic";
1499 subtype = "Processor";
1505 subtype = "Engine/Coprocessor";
1516 major = "Performance Monitor";
1522 subtype = "Processor";
1528 subtype = "Engine/Coprocessor";
1539 command_print(cmd, "\t\tType is 0x%02x, %s, %s",
1540 devtype & ARM_CS_C9_DEVTYPE_MASK,
1546 * Actions/operations to be executed while parsing ROM tables.
1550 * Executed at the start of a new MEM-AP, typically to print the MEM-AP header.
1551 * @param retval Error encountered while reading AP.
1552 * @param ap Pointer to AP.
1553 * @param dbgbase Value of MEM-AP Debug Base Address register.
1554 * @param apid Value of MEM-AP IDR Identification Register.
1555 * @param priv Pointer to private data.
1556 * @return ERROR_OK on success, else a fault code.
1558 int (*mem_ap_header)(int retval, struct adiv5_ap *ap, uint64_t dbgbase,
1559 uint32_t apid, void *priv);
1561 * Executed when a CoreSight component is parsed, typically to print
1562 * information on the component.
1563 * @param retval Error encountered while reading component's registers.
1564 * @param v Pointer to a container of the component's registers.
1565 * @param depth The current depth level of ROM table.
1566 * @param priv Pointer to private data.
1567 * @return ERROR_OK on success, else a fault code.
1569 int (*cs_component)(int retval, struct cs_component_vals *v, int depth, void *priv);
1571 * Executed for each entry of a ROM table, typically to print the entry
1572 * and information about validity or end-of-table mark.
1573 * @param retval Error encountered while reading the ROM table entry.
1574 * @param depth The current depth level of ROM table.
1575 * @param offset The offset of the entry in the ROM table.
1576 * @param romentry The value of the ROM table entry.
1577 * @param priv Pointer to private data.
1578 * @return ERROR_OK on success, else a fault code.
1580 int (*rom_table_entry)(int retval, int depth, unsigned int offset, uint64_t romentry,
1589 * Wrapper around struct rtp_ops::mem_ap_header.
1590 * Input parameter @a retval is propagated.
1592 static int rtp_ops_mem_ap_header(const struct rtp_ops *ops,
1593 int retval, struct adiv5_ap *ap, uint64_t dbgbase, uint32_t apid)
1595 if (!ops->mem_ap_header)
1598 int retval1 = ops->mem_ap_header(retval, ap, dbgbase, apid, ops->priv);
1599 if (retval != ERROR_OK)
1605 * Wrapper around struct rtp_ops::cs_component.
1606 * Input parameter @a retval is propagated.
1608 static int rtp_ops_cs_component(const struct rtp_ops *ops,
1609 int retval, struct cs_component_vals *v, int depth)
1611 if (!ops->cs_component)
1614 int retval1 = ops->cs_component(retval, v, depth, ops->priv);
1615 if (retval != ERROR_OK)
1621 * Wrapper around struct rtp_ops::rom_table_entry.
1622 * Input parameter @a retval is propagated.
1624 static int rtp_ops_rom_table_entry(const struct rtp_ops *ops,
1625 int retval, int depth, unsigned int offset, uint64_t romentry)
1627 if (!ops->rom_table_entry)
1630 int retval1 = ops->rom_table_entry(retval, depth, offset, romentry, ops->priv);
1631 if (retval != ERROR_OK)
1636 /* Broken ROM tables can have circular references. Stop after a while */
1637 #define ROM_TABLE_MAX_DEPTH (16)
1640 * Value used only during lookup of a CoreSight component in ROM table.
1641 * Return CORESIGHT_COMPONENT_FOUND when component is found.
1642 * Return ERROR_OK when component is not found yet.
1643 * Return any other ERROR_* in case of error.
1645 #define CORESIGHT_COMPONENT_FOUND (1)
1647 static int rtp_cs_component(const struct rtp_ops *ops,
1648 struct adiv5_ap *ap, target_addr_t dbgbase, int depth);
1650 static int rtp_rom_loop(const struct rtp_ops *ops,
1651 struct adiv5_ap *ap, target_addr_t base_address, int depth,
1652 unsigned int width, unsigned int max_entries)
1654 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1656 unsigned int offset = 0;
1657 while (max_entries--) {
1659 uint32_t romentry_low, romentry_high;
1660 target_addr_t component_base;
1661 unsigned int saved_offset = offset;
1663 int retval = mem_ap_read_u32(ap, base_address + offset, &romentry_low);
1665 if (retval == ERROR_OK && width == 64) {
1666 retval = mem_ap_read_u32(ap, base_address + offset, &romentry_high);
1669 if (retval == ERROR_OK)
1670 retval = dap_run(ap->dap);
1671 if (retval != ERROR_OK) {
1672 LOG_DEBUG("Failed read ROM table entry");
1677 romentry = (((uint64_t)romentry_high) << 32) | romentry_low;
1678 component_base = base_address +
1679 ((((uint64_t)romentry_high) << 32) | (romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK));
1681 romentry = romentry_low;
1682 /* "romentry" is signed */
1683 component_base = base_address + (int32_t)(romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK);
1684 if (!is_64bit_ap(ap))
1685 component_base = (uint32_t)component_base;
1687 retval = rtp_ops_rom_table_entry(ops, retval, depth, saved_offset, romentry);
1688 if (retval != ERROR_OK)
1691 if (romentry == 0) {
1692 /* End of ROM table */
1696 if (!(romentry & ARM_CS_ROMENTRY_PRESENT))
1700 retval = rtp_cs_component(ops, ap, component_base, depth + 1);
1701 if (retval == CORESIGHT_COMPONENT_FOUND)
1702 return CORESIGHT_COMPONENT_FOUND;
1703 if (retval != ERROR_OK) {
1704 /* TODO: do we need to send an ABORT before continuing? */
1705 LOG_DEBUG("Ignore error parsing CoreSight component");
1713 static int rtp_cs_component(const struct rtp_ops *ops,
1714 struct adiv5_ap *ap, target_addr_t base_address, int depth)
1716 struct cs_component_vals v;
1719 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1721 if (depth > ROM_TABLE_MAX_DEPTH)
1722 retval = ERROR_FAIL;
1724 retval = rtp_read_cs_regs(ap, base_address, &v);
1726 retval = rtp_ops_cs_component(ops, retval, &v, depth);
1727 if (retval == CORESIGHT_COMPONENT_FOUND)
1728 return CORESIGHT_COMPONENT_FOUND;
1729 if (retval != ERROR_OK)
1730 return ERROR_OK; /* Don't abort recursion */
1732 if (!is_valid_arm_cs_cidr(v.cid))
1733 return ERROR_OK; /* Don't abort recursion */
1735 const unsigned int class = ARM_CS_CIDR_CLASS(v.cid);
1737 if (class == ARM_CS_CLASS_0X1_ROM_TABLE)
1738 return rtp_rom_loop(ops, ap, base_address, depth, 32, 960);
1740 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
1741 if ((v.devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
1744 /* quit if not ROM table */
1745 if ((v.devarch & DEVARCH_ID_MASK) != DEVARCH_ROM_C_0X9)
1748 if ((v.devid & ARM_CS_C9_DEVID_FORMAT_MASK) == ARM_CS_C9_DEVID_FORMAT_64BIT)
1749 return rtp_rom_loop(ops, ap, base_address, depth, 64, 256);
1751 return rtp_rom_loop(ops, ap, base_address, depth, 32, 512);
1754 /* Class other than 0x1 and 0x9 */
1758 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap)
1762 target_addr_t dbgbase, invalid_entry;
1764 /* Now we read ROM table ID registers, ref. ARM IHI 0029B sec */
1765 retval = dap_get_debugbase(ap, &dbgbase, &apid);
1766 if (retval != ERROR_OK)
1768 retval = rtp_ops_mem_ap_header(ops, retval, ap, dbgbase, apid);
1769 if (retval != ERROR_OK)
1775 /* NOTE: a MEM-AP may have a single CoreSight component that's
1776 * not a ROM table ... or have no such components at all.
1778 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1780 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1781 if (is_64bit_ap(ap))
1782 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1784 invalid_entry = 0xFFFFFFFFul;
1786 if (dbgbase != invalid_entry && (dbgbase & 0x3) != 0x2) {
1787 retval = rtp_cs_component(ops, ap, dbgbase & 0xFFFFFFFFFFFFF000ull, 0);
1788 if (retval == CORESIGHT_COMPONENT_FOUND)
1789 return CORESIGHT_COMPONENT_FOUND;
1796 /* Actions for command "dap info" */
1798 static int dap_info_mem_ap_header(int retval, struct adiv5_ap *ap,
1799 target_addr_t dbgbase, uint32_t apid, void *priv)
1801 struct command_invocation *cmd = priv;
1802 target_addr_t invalid_entry;
1804 if (retval != ERROR_OK) {
1805 command_print(cmd, "\t\tCan't read MEM-AP, the corresponding core might be turned off");
1809 command_print(cmd, "AP ID register 0x%8.8" PRIx32, apid);
1811 command_print(cmd, "No AP found at this AP#0x%" PRIx64, ap->ap_num);
1815 command_print(cmd, "\tType is %s", ap_type_to_description(apid & AP_TYPE_MASK));
1817 /* NOTE: a MEM-AP may have a single CoreSight component that's
1818 * not a ROM table ... or have no such components at all.
1820 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
1822 if (class == AP_REG_IDR_CLASS_MEM_AP) {
1823 if (is_64bit_ap(ap))
1824 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
1826 invalid_entry = 0xFFFFFFFFul;
1828 command_print(cmd, "MEM-AP BASE " TARGET_ADDR_FMT, dbgbase);
1830 if (dbgbase == invalid_entry || (dbgbase & 0x3) == 0x2) {
1831 command_print(cmd, "\tNo ROM table present");
1834 command_print(cmd, "\tValid ROM table present");
1836 command_print(cmd, "\tROM table in legacy format");
1843 static int dap_info_cs_component(int retval, struct cs_component_vals *v, int depth, void *priv)
1845 struct command_invocation *cmd = priv;
1847 if (depth > ROM_TABLE_MAX_DEPTH) {
1848 command_print(cmd, "\tTables too deep");
1852 command_print(cmd, "\t\tComponent base address " TARGET_ADDR_FMT, v->component_base);
1854 if (retval != ERROR_OK) {
1855 command_print(cmd, "\t\tCan't read component, the corresponding core might be turned off");
1859 if (!is_valid_arm_cs_cidr(v->cid)) {
1860 command_print(cmd, "\t\tInvalid CID 0x%08" PRIx32, v->cid);
1861 return ERROR_OK; /* Don't abort recursion */
1864 /* component may take multiple 4K pages */
1865 uint32_t size = ARM_CS_PIDR_SIZE(v->pid);
1867 command_print(cmd, "\t\tStart address " TARGET_ADDR_FMT, v->component_base - 0x1000 * size);
1869 command_print(cmd, "\t\tPeripheral ID 0x%010" PRIx64, v->pid);
1871 const unsigned int part_num = ARM_CS_PIDR_PART(v->pid);
1872 unsigned int designer_id = ARM_CS_PIDR_DESIGNER(v->pid);
1874 if (v->pid & ARM_CS_PIDR_JEDEC) {
1876 command_print(cmd, "\t\tDesigner is 0x%03x, %s",
1877 designer_id, jep106_manufacturer(designer_id));
1879 /* Legacy ASCII ID, clear invalid bits */
1880 designer_id &= 0x7f;
1881 command_print(cmd, "\t\tDesigner ASCII code 0x%02x, %s",
1882 designer_id, designer_id == 0x41 ? "ARM" : "<unknown>");
1885 const struct dap_part_nums *partnum = pidr_to_part_num(designer_id, part_num);
1886 command_print(cmd, "\t\tPart is 0x%03x, %s %s", part_num, partnum->type, partnum->full);
1888 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
1889 command_print(cmd, "\t\tComponent class is 0x%x, %s", class, class_description[class]);
1891 if (class == ARM_CS_CLASS_0X1_ROM_TABLE) {
1892 if (v->devtype_memtype & ARM_CS_C1_MEMTYPE_SYSMEM_MASK)
1893 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
1895 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
1899 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
1900 dap_devtype_display(cmd, v->devtype_memtype);
1902 /* REVISIT also show ARM_CS_C9_DEVID */
1904 if ((v->devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
1907 unsigned int architect_id = ARM_CS_C9_DEVARCH_ARCHITECT(v->devarch);
1908 unsigned int revision = ARM_CS_C9_DEVARCH_REVISION(v->devarch);
1909 command_print(cmd, "\t\tDev Arch is 0x%08" PRIx32 ", %s \"%s\" rev.%u", v->devarch,
1910 jep106_manufacturer(architect_id), class0x9_devarch_description(v->devarch),
1913 if ((v->devarch & DEVARCH_ID_MASK) == DEVARCH_ROM_C_0X9) {
1914 command_print(cmd, "\t\tType is ROM table");
1916 if (v->devid & ARM_CS_C9_DEVID_SYSMEM_MASK)
1917 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
1919 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
1924 /* Class other than 0x1 and 0x9 */
1928 static int dap_info_rom_table_entry(int retval, int depth,
1929 unsigned int offset, uint64_t romentry, void *priv)
1931 struct command_invocation *cmd = priv;
1935 snprintf(tabs, sizeof(tabs), "[L%02d] ", depth);
1937 if (retval != ERROR_OK) {
1938 command_print(cmd, "\t%sROMTABLE[0x%x] Read error", tabs, offset);
1939 command_print(cmd, "\t\tUnable to continue");
1940 command_print(cmd, "\t%s\tStop parsing of ROM table", tabs);
1944 command_print(cmd, "\t%sROMTABLE[0x%x] = 0x%08" PRIx64,
1945 tabs, offset, romentry);
1947 if (romentry == 0) {
1948 command_print(cmd, "\t%s\tEnd of ROM table", tabs);
1952 if (!(romentry & ARM_CS_ROMENTRY_PRESENT)) {
1953 command_print(cmd, "\t\tComponent not present");
1960 int dap_info_command(struct command_invocation *cmd, struct adiv5_ap *ap)
1962 struct rtp_ops dap_info_ops = {
1963 .mem_ap_header = dap_info_mem_ap_header,
1964 .cs_component = dap_info_cs_component,
1965 .rom_table_entry = dap_info_rom_table_entry,
1969 return rtp_ap(&dap_info_ops, ap);
1972 /* Actions for dap_lookup_cs_component() */
1974 struct dap_lookup_data {
1979 uint64_t component_base;
1982 static int dap_lookup_cs_component_cs_component(int retval,
1983 struct cs_component_vals *v, int depth, void *priv)
1985 struct dap_lookup_data *lookup = priv;
1987 if (retval != ERROR_OK)
1990 if (!is_valid_arm_cs_cidr(v->cid))
1993 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
1994 if (class != ARM_CS_CLASS_0X9_CS_COMPONENT)
1997 if ((v->devtype_memtype & ARM_CS_C9_DEVTYPE_MASK) != lookup->type)
2001 /* search for next one */
2007 lookup->component_base = v->component_base;
2008 return CORESIGHT_COMPONENT_FOUND;
2011 int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type,
2012 target_addr_t *addr, int32_t core_id)
2014 struct dap_lookup_data lookup = {
2018 struct rtp_ops dap_lookup_cs_component_ops = {
2019 .mem_ap_header = NULL,
2020 .cs_component = dap_lookup_cs_component_cs_component,
2021 .rom_table_entry = NULL,
2025 int retval = rtp_ap(&dap_lookup_cs_component_ops, ap);
2026 if (retval == CORESIGHT_COMPONENT_FOUND) {
2027 LOG_DEBUG("CS lookup found at 0x%" PRIx64, lookup.component_base);
2028 *addr = lookup.component_base;
2031 if (retval != ERROR_OK) {
2032 LOG_DEBUG("CS lookup error %d", retval);
2035 LOG_DEBUG("CS lookup not found");
2036 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2039 enum adiv5_cfg_param {
2043 CFG_CTIBASE, /* DEPRECATED */
2046 static const struct jim_nvp nvp_config_opts[] = {
2047 { .name = "-dap", .value = CFG_DAP },
2048 { .name = "-ap-num", .value = CFG_AP_NUM },
2049 { .name = "-baseaddr", .value = CFG_BASEADDR },
2050 { .name = "-ctibase", .value = CFG_CTIBASE }, /* DEPRECATED */
2051 { .name = NULL, .value = -1 }
2054 static int adiv5_jim_spot_configure(struct jim_getopt_info *goi,
2055 struct adiv5_dap **dap_p, uint64_t *ap_num_p, uint32_t *base_p)
2057 assert(dap_p && ap_num_p);
2062 Jim_SetEmptyResult(goi->interp);
2065 int e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2068 return JIM_CONTINUE;
2070 /* base_p can be NULL, then '-baseaddr' option is treated as unknown */
2071 if (!base_p && (n->value == CFG_BASEADDR || n->value == CFG_CTIBASE))
2072 return JIM_CONTINUE;
2074 e = jim_getopt_obj(goi, NULL);
2080 if (goi->isconfigure) {
2082 struct adiv5_dap *dap;
2083 e = jim_getopt_obj(goi, &o_t);
2086 dap = dap_instance_by_jim_obj(goi->interp, o_t);
2088 Jim_SetResultString(goi->interp, "DAP name invalid!", -1);
2091 if (*dap_p && *dap_p != dap) {
2092 Jim_SetResultString(goi->interp,
2093 "DAP assignment cannot be changed!", -1);
2101 Jim_SetResultString(goi->interp, "DAP not configured", -1);
2104 Jim_SetResultString(goi->interp, adiv5_dap_name(*dap_p), -1);
2109 if (goi->isconfigure) {
2110 /* jim_wide is a signed 64 bits int, ap_num is unsigned with max 52 bits */
2112 e = jim_getopt_wide(goi, &ap_num);
2115 /* we still don't know dap->adi_version */
2116 if (ap_num < 0 || (ap_num > DP_APSEL_MAX && (ap_num & 0xfff))) {
2117 Jim_SetResultString(goi->interp, "Invalid AP number!", -1);
2124 if (*ap_num_p == DP_APSEL_INVALID) {
2125 Jim_SetResultString(goi->interp, "AP number not configured", -1);
2128 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *ap_num_p));
2133 LOG_WARNING("DEPRECATED! use \'-baseaddr' not \'-ctibase\'");
2136 if (goi->isconfigure) {
2138 e = jim_getopt_wide(goi, &base);
2141 *base_p = (uint32_t)base;
2145 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *base_p));
2153 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "NO PARAMS");
2157 int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
2159 struct adiv5_private_config *pc;
2162 pc = (struct adiv5_private_config *)target->private_config;
2164 pc = calloc(1, sizeof(struct adiv5_private_config));
2166 LOG_ERROR("Out of memory");
2169 pc->ap_num = DP_APSEL_INVALID;
2170 target->private_config = pc;
2173 target->has_dap = true;
2175 e = adiv5_jim_spot_configure(goi, &pc->dap, &pc->ap_num, NULL);
2179 if (pc->dap && !target->dap_configured) {
2180 if (target->tap_configured) {
2182 Jim_SetResultString(goi->interp,
2183 "-chain-position and -dap configparams are mutually exclusive!", -1);
2186 target->tap = pc->dap->tap;
2187 target->dap_configured = true;
2193 int adiv5_verify_config(struct adiv5_private_config *pc)
2204 int adiv5_jim_mem_ap_spot_configure(struct adiv5_mem_ap_spot *cfg,
2205 struct jim_getopt_info *goi)
2207 return adiv5_jim_spot_configure(goi, &cfg->dap, &cfg->ap_num, &cfg->base);
2210 int adiv5_mem_ap_spot_init(struct adiv5_mem_ap_spot *p)
2213 p->ap_num = DP_APSEL_INVALID;
2218 COMMAND_HANDLER(handle_dap_info_command)
2220 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2228 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2229 if (!is_ap_num_valid(dap, apsel)) {
2230 command_print(CMD, "Invalid AP number");
2231 return ERROR_COMMAND_ARGUMENT_INVALID;
2235 return ERROR_COMMAND_SYNTAX_ERROR;
2238 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2240 command_print(CMD, "Cannot get AP");
2244 int retval = dap_info_command(CMD, ap);
2249 COMMAND_HANDLER(dap_baseaddr_command)
2251 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2253 uint32_t baseaddr_lower, baseaddr_upper;
2254 struct adiv5_ap *ap;
2255 target_addr_t baseaddr;
2265 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2266 if (!is_ap_num_valid(dap, apsel)) {
2267 command_print(CMD, "Invalid AP number");
2268 return ERROR_COMMAND_ARGUMENT_INVALID;
2272 return ERROR_COMMAND_SYNTAX_ERROR;
2275 /* NOTE: assumes we're talking to a MEM-AP, which
2276 * has a base address. There are other kinds of AP,
2277 * though they're not common for now. This should
2278 * use the ID register to verify it's a MEM-AP.
2281 ap = dap_get_ap(dap, apsel);
2283 command_print(CMD, "Cannot get AP");
2287 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseaddr_lower);
2289 if (retval == ERROR_OK && ap->cfg_reg == MEM_AP_REG_CFG_INVALID)
2290 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
2292 if (retval == ERROR_OK && (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap))) {
2293 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
2294 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseaddr_upper);
2297 if (retval == ERROR_OK)
2298 retval = dap_run(dap);
2300 if (retval != ERROR_OK)
2303 if (is_64bit_ap(ap)) {
2304 baseaddr = (((target_addr_t)baseaddr_upper) << 32) | baseaddr_lower;
2305 command_print(CMD, "0x%016" PRIx64, baseaddr);
2307 command_print(CMD, "0x%08" PRIx32, baseaddr_lower);
2312 COMMAND_HANDLER(dap_memaccess_command)
2314 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2315 struct adiv5_ap *ap;
2316 uint32_t memaccess_tck;
2320 ap = dap_get_ap(dap, dap->apsel);
2322 command_print(CMD, "Cannot get AP");
2325 memaccess_tck = ap->memaccess_tck;
2328 ap = dap_get_config_ap(dap, dap->apsel);
2330 command_print(CMD, "Cannot get AP");
2333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], memaccess_tck);
2334 ap->memaccess_tck = memaccess_tck;
2337 return ERROR_COMMAND_SYNTAX_ERROR;
2342 command_print(CMD, "memory bus access delay set to %" PRIu32 " tck",
2348 COMMAND_HANDLER(dap_apsel_command)
2350 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2355 command_print(CMD, "0x%" PRIx64, dap->apsel);
2358 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2359 if (!is_ap_num_valid(dap, apsel)) {
2360 command_print(CMD, "Invalid AP number");
2361 return ERROR_COMMAND_ARGUMENT_INVALID;
2365 return ERROR_COMMAND_SYNTAX_ERROR;
2372 COMMAND_HANDLER(dap_apcsw_command)
2374 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2375 struct adiv5_ap *ap;
2376 uint32_t csw_val, csw_mask;
2380 ap = dap_get_ap(dap, dap->apsel);
2382 command_print(CMD, "Cannot get AP");
2385 command_print(CMD, "AP#0x%" PRIx64 " selected, csw 0x%8.8" PRIx32,
2386 dap->apsel, ap->csw_default);
2389 if (strcmp(CMD_ARGV[0], "default") == 0)
2390 csw_val = CSW_AHB_DEFAULT;
2392 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2394 if (csw_val & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2395 LOG_ERROR("CSW value cannot include 'Size' and 'AddrInc' bit-fields");
2396 return ERROR_COMMAND_ARGUMENT_INVALID;
2398 ap = dap_get_config_ap(dap, dap->apsel);
2400 command_print(CMD, "Cannot get AP");
2403 ap->csw_default = csw_val;
2406 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2407 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], csw_mask);
2408 if (csw_mask & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2409 LOG_ERROR("CSW mask cannot include 'Size' and 'AddrInc' bit-fields");
2410 return ERROR_COMMAND_ARGUMENT_INVALID;
2412 ap = dap_get_config_ap(dap, dap->apsel);
2414 command_print(CMD, "Cannot get AP");
2417 ap->csw_default = (ap->csw_default & ~csw_mask) | (csw_val & csw_mask);
2420 return ERROR_COMMAND_SYNTAX_ERROR;
2429 COMMAND_HANDLER(dap_apid_command)
2431 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2441 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2442 if (!is_ap_num_valid(dap, apsel)) {
2443 command_print(CMD, "Invalid AP number");
2444 return ERROR_COMMAND_ARGUMENT_INVALID;
2448 return ERROR_COMMAND_SYNTAX_ERROR;
2451 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2453 command_print(CMD, "Cannot get AP");
2456 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &apid);
2457 if (retval != ERROR_OK) {
2461 retval = dap_run(dap);
2463 if (retval != ERROR_OK)
2466 command_print(CMD, "0x%8.8" PRIx32, apid);
2471 COMMAND_HANDLER(dap_apreg_command)
2473 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2475 uint32_t reg, value;
2478 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2479 return ERROR_COMMAND_SYNTAX_ERROR;
2481 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2482 if (!is_ap_num_valid(dap, apsel)) {
2483 command_print(CMD, "Invalid AP number");
2484 return ERROR_COMMAND_ARGUMENT_INVALID;
2487 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], reg);
2488 if (is_adiv6(dap)) {
2489 if (reg >= 4096 || (reg & 3)) {
2490 command_print(CMD, "Invalid reg value (should be less than 4096 and 4 bytes aligned)");
2491 return ERROR_COMMAND_ARGUMENT_INVALID;
2493 } else { /* ADI version 5 */
2494 if (reg >= 256 || (reg & 3)) {
2495 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2496 return ERROR_COMMAND_ARGUMENT_INVALID;
2500 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2502 command_print(CMD, "Cannot get AP");
2506 if (CMD_ARGC == 3) {
2507 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], value);
2508 /* see if user supplied register address is a match for the CSW or TAR register */
2509 if (reg == MEM_AP_REG_CSW(dap)) {
2510 ap->csw_value = 0; /* invalid, in case write fails */
2511 retval = dap_queue_ap_write(ap, reg, value);
2512 if (retval == ERROR_OK)
2513 ap->csw_value = value;
2514 } else if (reg == MEM_AP_REG_TAR(dap)) {
2515 retval = dap_queue_ap_write(ap, reg, value);
2516 if (retval == ERROR_OK)
2517 ap->tar_value = (ap->tar_value & ~0xFFFFFFFFull) | value;
2519 /* To track independent writes to TAR and TAR64, two tar_valid flags */
2520 /* should be used. To keep it simple, tar_valid is only invalidated on a */
2521 /* write fail. This approach causes a later re-write of the TAR and TAR64 */
2522 /* if tar_valid is false. */
2523 ap->tar_valid = false;
2525 } else if (reg == MEM_AP_REG_TAR64(dap)) {
2526 retval = dap_queue_ap_write(ap, reg, value);
2527 if (retval == ERROR_OK)
2528 ap->tar_value = (ap->tar_value & 0xFFFFFFFFull) | (((target_addr_t)value) << 32);
2530 /* See above comment for the MEM_AP_REG_TAR failed write case */
2531 ap->tar_valid = false;
2534 retval = dap_queue_ap_write(ap, reg, value);
2537 retval = dap_queue_ap_read(ap, reg, &value);
2539 if (retval == ERROR_OK)
2540 retval = dap_run(dap);
2544 if (retval != ERROR_OK)
2548 command_print(CMD, "0x%08" PRIx32, value);
2553 COMMAND_HANDLER(dap_dpreg_command)
2555 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2556 uint32_t reg, value;
2559 if (CMD_ARGC < 1 || CMD_ARGC > 2)
2560 return ERROR_COMMAND_SYNTAX_ERROR;
2562 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg);
2563 if (reg >= 256 || (reg & 3)) {
2564 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2565 return ERROR_COMMAND_ARGUMENT_INVALID;
2568 if (CMD_ARGC == 2) {
2569 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2570 retval = dap_queue_dp_write(dap, reg, value);
2572 retval = dap_queue_dp_read(dap, reg, &value);
2574 if (retval == ERROR_OK)
2575 retval = dap_run(dap);
2577 if (retval != ERROR_OK)
2581 command_print(CMD, "0x%08" PRIx32, value);
2586 COMMAND_HANDLER(dap_ti_be_32_quirks_command)
2588 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2589 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->ti_be_32_quirks,
2590 "TI BE-32 quirks mode");
2593 const struct command_registration dap_instance_commands[] = {
2596 .handler = handle_dap_info_command,
2597 .mode = COMMAND_EXEC,
2598 .help = "display ROM table for MEM-AP "
2599 "(default currently selected AP)",
2600 .usage = "[ap_num]",
2604 .handler = dap_apsel_command,
2605 .mode = COMMAND_ANY,
2606 .help = "Set the currently selected AP (default 0) "
2607 "and display the result",
2608 .usage = "[ap_num]",
2612 .handler = dap_apcsw_command,
2613 .mode = COMMAND_ANY,
2614 .help = "Set CSW default bits",
2615 .usage = "[value [mask]]",
2620 .handler = dap_apid_command,
2621 .mode = COMMAND_EXEC,
2622 .help = "return ID register from AP "
2623 "(default currently selected AP)",
2624 .usage = "[ap_num]",
2628 .handler = dap_apreg_command,
2629 .mode = COMMAND_EXEC,
2630 .help = "read/write a register from AP "
2631 "(reg is byte address of a word register, like 0 4 8...)",
2632 .usage = "ap_num reg [value]",
2636 .handler = dap_dpreg_command,
2637 .mode = COMMAND_EXEC,
2638 .help = "read/write a register from DP "
2639 "(reg is byte address (bank << 4 | reg) of a word register, like 0 4 8...)",
2640 .usage = "reg [value]",
2644 .handler = dap_baseaddr_command,
2645 .mode = COMMAND_EXEC,
2646 .help = "return debug base address from MEM-AP "
2647 "(default currently selected AP)",
2648 .usage = "[ap_num]",
2651 .name = "memaccess",
2652 .handler = dap_memaccess_command,
2653 .mode = COMMAND_EXEC,
2654 .help = "set/get number of extra tck for MEM-AP memory "
2655 "bus access [0-255]",
2656 .usage = "[cycles]",
2659 .name = "ti_be_32_quirks",
2660 .handler = dap_ti_be_32_quirks_command,
2661 .mode = COMMAND_CONFIG,
2662 .help = "set/get quirks mode for TI TMS450/TMS570 processors",
2663 .usage = "[enable]",
2665 COMMAND_REGISTRATION_DONE