#include "target_request.h"
#include "target_type.h"
#include "armv8_opcodes.h"
+#include "armv8_cache.h"
#include <helper/time_support.h>
static int aarch64_poll(struct target *target);
return retval;
break;
default:
- LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
+ retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
+ if (retval != ERROR_OK)
+ return retval;
+ break;
}
}
return retval;
LOG_DEBUG(" ");
- /* Unlocking the debug registers for modification
- * The debugport might be uninitialised so try twice */
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
- if (retval != ERROR_OK) {
- /* try again */
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
- if (retval == ERROR_OK)
- LOG_USER("Locking debug access failed on first, but succeeded on second try.");
- }
- if (retval != ERROR_OK)
- return retval;
/* Clear Sticky Power Down status Bit in PRSR to enable access to
the registers in the Core Power Domain */
retval = mem_ap_read_atomic_u32(armv8->debug_ap,
if (retval != ERROR_OK)
return retval;
- /* Enabling of instruction execution in debug mode is done in debug_entry code */
+ /*
+ * Static CTI configuration:
+ * Channel 0 -> trigger outputs HALT request to PE
+ * Channel 1 -> trigger outputs Resume request to PE
+ * Gate all channel trigger events from entering the CTM
+ */
+
+ /* Enable CTI */
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_CTR, 1);
+ /* By default, gate all channel triggers to and from the CTM */
+ if (retval == ERROR_OK)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_GATE, 0);
+ /* output halt requests to PE on channel 0 trigger */
+ if (retval == ERROR_OK)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
+ /* output restart requests to PE on channel 1 trigger */
+ if (retval == ERROR_OK)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
+ if (retval != ERROR_OK)
+ return retval;
/* Resync breakpoint registers */
uint32_t opcode, uint32_t data)
{
struct aarch64_common *a8 = dpm_to_a8(dpm);
+
uint32_t dscr = DSCR_ITE;
int retval;
return retval;
retval = aarch64_exec_opcode(
- a8->armv8_common.arm.target,
- ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
- &dscr);
+ a8->armv8_common.arm.target, armv8_opcode(&a8->armv8_common, READ_REG_DTRRX), &dscr);
if (retval != ERROR_OK)
return retval;
static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
{
struct target *target = dpm->arm->target;
+ struct armv8_common *armv8 = target_to_armv8(target);
uint32_t dscr = DSCR_ITE;
/* "Prefetch flush" after modifying execution status in CPSR */
- return aarch64_exec_opcode(target,
- DSB_SY,
- &dscr);
+ return aarch64_exec_opcode(target, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dscr);
}
static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
/* write R0 to DCC */
retval = aarch64_exec_opcode(
- a8->armv8_common.arm.target,
- ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
- &dscr);
+ a8->armv8_common.arm.target, armv8_opcode(&a8->armv8_common, WRITE_REG_DTRTX), &dscr);
if (retval != ERROR_OK)
return retval;
static int aarch64_halt_smp(struct target *target)
{
- int retval = 0;
- struct target_list *head;
- struct target *curr;
- head = target->head;
+ int retval = ERROR_OK;
+ struct target_list *head = target->head;
+
while (head != (struct target_list *)NULL) {
- curr = head->target;
- if ((curr != target) && (curr->state != TARGET_HALTED))
- retval += aarch64_halt(curr);
+ struct target *curr = head->target;
+ struct armv8_common *armv8 = target_to_armv8(curr);
+
+ /* open the gate for channel 0 to let HALT requests pass to the CTM */
+ if (curr->smp)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_GATE, CTI_CHNL(0));
+ if (retval != ERROR_OK)
+ break;
+
head = head->next;
}
+
+ /* halt the target PE */
+ if (retval == ERROR_OK)
+ retval = aarch64_halt(target);
+
return retval;
}
uint32_t dscr;
struct armv8_common *armv8 = target_to_armv8(target);
- /* enable CTI*/
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_CTR, 1);
- if (retval != ERROR_OK)
- return retval;
-
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_GATE, 3);
- if (retval != ERROR_OK)
- return retval;
-
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_OUTEN0, 1);
- if (retval != ERROR_OK)
- return retval;
-
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_OUTEN1, 2);
- if (retval != ERROR_OK)
- return retval;
-
/*
* add HDE in halting debug mode
*/
retval = mem_ap_read_atomic_u32(armv8->debug_ap,
armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
+ if (retval == ERROR_OK)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
if (retval != ERROR_OK)
return retval;
+ /* trigger an event on channel 0, this outputs a halt request to the PE */
retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
- if (retval != ERROR_OK)
- return retval;
-
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_APPPULSE, 1);
- if (retval != ERROR_OK)
- return retval;
-
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_INACK, 1);
+ armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
if (retval != ERROR_OK)
return retval;
-
long long then = timeval_ms();
for (;; ) {
retval = mem_ap_read_atomic_u32(armv8->debug_ap,
/* registers are now invalid */
register_cache_invalidate(arm->core_cache);
-#if 0
- /* the front-end may request us not to handle breakpoints */
- if (handle_breakpoints) {
- /* Single step past breakpoint at current address */
- breakpoint = breakpoint_find(target, resume_pc);
- if (breakpoint) {
- LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
- cortex_m3_unset_breakpoint(target, breakpoint);
- cortex_m3_single_step_core(target);
- cortex_m3_set_breakpoint(target, breakpoint);
- }
- }
-#endif
-
return retval;
}
-static int aarch64_internal_restart(struct target *target)
+static int aarch64_internal_restart(struct target *target, bool slave_pe)
{
struct armv8_common *armv8 = target_to_armv8(target);
struct arm *arm = &armv8->arm;
if ((dscr & DSCR_ITE) == 0)
LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
+ /* make sure to acknowledge the halt event before resuming */
retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_APPPULSE, 2);
+ armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
+
+ /*
+ * open the CTI gate for channel 1 so that the restart events
+ * get passed along to all PEs
+ */
+ if (retval == ERROR_OK)
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_GATE, CTI_CHNL(1));
if (retval != ERROR_OK)
return retval;
- long long then = timeval_ms();
- for (;; ) {
- retval = mem_ap_read_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
+ if (!slave_pe) {
+ /* trigger an event on channel 1, generates a restart request to the PE */
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
if (retval != ERROR_OK)
return retval;
- if ((dscr & DSCR_HDE) != 0)
- break;
- if (timeval_ms() > then + 1000) {
- LOG_ERROR("Timeout waiting for resume");
- return ERROR_FAIL;
+
+ long long then = timeval_ms();
+ for (;; ) {
+ retval = mem_ap_read_atomic_u32(armv8->debug_ap,
+ armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
+ if (retval != ERROR_OK)
+ return retval;
+ if ((dscr & DSCR_HDE) != 0)
+ break;
+ if (timeval_ms() > then + 1000) {
+ LOG_ERROR("Timeout waiting for resume");
+ return ERROR_FAIL;
+ }
}
}
/* resume current address , not in step mode */
retval += aarch64_internal_restore(curr, 1, &address,
handle_breakpoints, 0);
- retval += aarch64_internal_restart(curr);
+ retval += aarch64_internal_restart(curr, true);
}
head = head->next;
if (retval != ERROR_OK)
return retval;
}
- aarch64_internal_restart(target);
+ aarch64_internal_restart(target, false);
if (!debug_execution) {
target->state = TARGET_RUNNING;
struct armv8_common *armv8 = &aarch64->armv8_common;
int retval;
+ /* clear sticky errors */
mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
+ armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
+
switch (armv8->arm.core_mode) {
case ARMV8_64_EL0T:
case ARMV8_64_EL1T:
return retval;
break;
default:
- LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
+ retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
+ if (retval != ERROR_OK)
+ return retval;
+ break;
}
+
LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
aarch64->system_control_reg_curr = aarch64->system_control_reg;
return ERROR_OK;
}
+static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
+{
+ struct armv8_common *armv8 = target_to_armv8(target);
+ uint32_t dscr;
+
+ /* Read DSCR */
+ int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
+ armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
+ if (ERROR_OK != retval)
+ return retval;
+
+ /* clear bitfield */
+ dscr &= ~bit_mask;
+ /* put new value */
+ dscr |= value & bit_mask;
+
+ /* write new DSCR */
+ retval = mem_ap_write_atomic_u32(armv8->debug_ap,
+ armv8->debug_base + CPUV8_DBG_DSCR, dscr);
+ return retval;
+}
+
static int aarch64_step(struct target *target, int current, target_addr_t address,
int handle_breakpoints)
{
struct armv8_common *armv8 = target_to_armv8(target);
int retval;
- uint32_t tmp;
+ uint32_t edecr;
if (target->state != TARGET_HALTED) {
LOG_WARNING("target not halted");
}
retval = mem_ap_read_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
+ armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
if (retval != ERROR_OK)
return retval;
+ /* make sure EDECR.SS is not set when restoring the register */
+ edecr &= ~0x4;
+
+ /* set EDECR.SS to enter hardware step mode */
retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
+ armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
if (retval != ERROR_OK)
return retval;
- target->debug_reason = DBG_REASON_SINGLESTEP;
- retval = aarch64_resume(target, 1, address, 0, 0);
+ /* disable interrupts while stepping */
+ retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
+ if (retval != ERROR_OK)
+ return ERROR_OK;
+
+ /* resume the target */
+ retval = aarch64_resume(target, current, address, 0, 0);
if (retval != ERROR_OK)
return retval;
long long then = timeval_ms();
while (target->state != TARGET_HALTED) {
- mem_ap_read_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
- LOG_DEBUG("DESR = %#x", tmp);
retval = aarch64_poll(target);
if (retval != ERROR_OK)
return retval;
}
}
+ /* restore EDECR */
retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
+ armv8->debug_base + CPUV8_DBG_EDECR, edecr);
if (retval != ERROR_OK)
return retval;
- target_call_event_callbacks(target, TARGET_EVENT_HALTED);
- if (target->state == TARGET_HALTED)
- LOG_DEBUG("target stepped");
+ /* restore interrupts */
+ retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
+ if (retval != ERROR_OK)
+ return ERROR_OK;
return ERROR_OK;
}
} else if (breakpoint->type == BKPT_SOFT) {
uint8_t code[4];
- buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
+
+ buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
retval = target_read_memory(target,
breakpoint->address & 0xFFFFFFFFFFFFFFFE,
breakpoint->length, 1,
breakpoint->orig_instr);
if (retval != ERROR_OK)
return retval;
+
+ armv8_cache_d_inner_flush_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
+
retval = target_write_memory(target,
breakpoint->address & 0xFFFFFFFFFFFFFFFE,
breakpoint->length, 1, code);
if (retval != ERROR_OK)
return retval;
+
+ armv8_cache_d_inner_flush_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
+
+ armv8_cache_i_inner_inval_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
+
breakpoint->set = 0x11; /* Any nice value but 0 */
}
}
} else {
/* restore original instruction (kept in target endianness) */
+
+ armv8_cache_d_inner_flush_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
+
if (breakpoint->length == 4) {
retval = target_write_memory(target,
breakpoint->address & 0xFFFFFFFFFFFFFFFE,
if (retval != ERROR_OK)
return retval;
}
+
+ armv8_cache_d_inner_flush_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
+
+ armv8_cache_i_inner_inval_virt(armv8,
+ breakpoint->address & 0xFFFFFFFFFFFFFFFE,
+ breakpoint->length);
}
breakpoint->set = 0;
target_addr_t address, uint32_t size,
uint32_t count, uint8_t *buffer)
{
- struct armv8_common *armv8 = target_to_armv8(target);
int retval = ERROR_COMMAND_SYNTAX_ERROR;
- struct adiv5_dap *swjdp = armv8->arm.dap;
- uint8_t apsel = swjdp->apsel;
LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
address, size, count);
if (count && buffer) {
-
- if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
-
- /* read memory through AHB-AP */
- retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
- } else {
- /* read memory through APB-AP */
- retval = aarch64_mmu_modify(target, 0);
- if (retval != ERROR_OK)
- return retval;
- retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
- }
+ /* read memory through APB-AP */
+ retval = aarch64_mmu_modify(target, 0);
+ if (retval != ERROR_OK)
+ return retval;
+ retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
}
return retval;
}
uint32_t size, uint32_t count, uint8_t *buffer)
{
int mmu_enabled = 0;
- target_addr_t virt, phys;
int retval;
- struct armv8_common *armv8 = target_to_armv8(target);
- struct adiv5_dap *swjdp = armv8->arm.dap;
- uint8_t apsel = swjdp->apsel;
/* aarch64 handles unaligned memory access */
LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
if (retval != ERROR_OK)
return retval;
- if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
- if (mmu_enabled) {
- virt = address;
- retval = aarch64_virt2phys(target, virt, &phys);
- if (retval != ERROR_OK)
- return retval;
-
- LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
- virt, phys);
- address = phys;
- }
- retval = aarch64_read_phys_memory(target, address, size, count,
- buffer);
- } else {
- if (mmu_enabled) {
- retval = aarch64_check_address(target, address);
- if (retval != ERROR_OK)
- return retval;
- /* enable MMU as we could have disabled it for phys
- access */
- retval = aarch64_mmu_modify(target, 1);
- if (retval != ERROR_OK)
- return retval;
- }
- retval = aarch64_read_apb_ap_memory(target, address, size,
- count, buffer);
+ if (mmu_enabled) {
+ retval = aarch64_check_address(target, address);
+ if (retval != ERROR_OK)
+ return retval;
+ /* enable MMU as we could have disabled it for phys access */
+ retval = aarch64_mmu_modify(target, 1);
+ if (retval != ERROR_OK)
+ return retval;
}
- return retval;
+ return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
}
static int aarch64_write_phys_memory(struct target *target,
target_addr_t address, uint32_t size,
uint32_t count, const uint8_t *buffer)
{
- struct armv8_common *armv8 = target_to_armv8(target);
- struct adiv5_dap *swjdp = armv8->arm.dap;
int retval = ERROR_COMMAND_SYNTAX_ERROR;
- uint8_t apsel = swjdp->apsel;
LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
size, count);
if (count && buffer) {
-
- if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
-
- /* write memory through AHB-AP */
- retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
- } else {
-
- /* write memory through APB-AP */
- retval = aarch64_mmu_modify(target, 0);
- if (retval != ERROR_OK)
- return retval;
- return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
- }
- }
-
- /* REVISIT this op is generic ARMv7-A/R stuff */
- if (retval == ERROR_OK && target->state == TARGET_HALTED) {
- struct arm_dpm *dpm = armv8->arm.dpm;
-
- retval = dpm->prepare(dpm);
+ /* write memory through APB-AP */
+ retval = aarch64_mmu_modify(target, 0);
if (retval != ERROR_OK)
return retval;
-
- /* The Cache handling will NOT work with MMU active, the
- * wrong addresses will be invalidated!
- *
- * For both ICache and DCache, walk all cache lines in the
- * address range. Cortex-A has fixed 64 byte line length.
- *
- * REVISIT per ARMv7, these may trigger watchpoints ...
- */
-
- /* invalidate I-Cache */
- if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
- /* ICIMVAU - Invalidate Cache single entry
- * with MVA to PoU
- * MCR p15, 0, r0, c7, c5, 1
- */
- for (uint32_t cacheline = 0;
- cacheline < size * count;
- cacheline += 64) {
- retval = dpm->instr_write_data_r0(dpm,
- ARMV8_MSR_GP(SYSTEM_ICIVAU, 0),
- address + cacheline);
- if (retval != ERROR_OK)
- return retval;
- }
- }
-
- /* invalidate D-Cache */
- if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
- /* DCIMVAC - Invalidate data Cache line
- * with MVA to PoC
- * MCR p15, 0, r0, c7, c6, 1
- */
- for (uint32_t cacheline = 0;
- cacheline < size * count;
- cacheline += 64) {
- retval = dpm->instr_write_data_r0(dpm,
- ARMV8_MSR_GP(SYSTEM_DCCVAU, 0),
- address + cacheline);
- if (retval != ERROR_OK)
- return retval;
- }
- }
-
- /* (void) */ dpm->finish(dpm);
+ return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
}
return retval;
uint32_t size, uint32_t count, const uint8_t *buffer)
{
int mmu_enabled = 0;
- target_addr_t virt, phys;
int retval;
- struct armv8_common *armv8 = target_to_armv8(target);
- struct adiv5_dap *swjdp = armv8->arm.dap;
- uint8_t apsel = swjdp->apsel;
/* aarch64 handles unaligned memory access */
LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
if (retval != ERROR_OK)
return retval;
- if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
- LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
- PRId32 "; count %" PRId32, address, size, count);
- if (mmu_enabled) {
- virt = address;
- retval = aarch64_virt2phys(target, virt, &phys);
- if (retval != ERROR_OK)
- return retval;
-
- LOG_DEBUG("Writing to virtual address. Translating v:0x%"
- TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
- address = phys;
- }
- retval = aarch64_write_phys_memory(target, address, size,
- count, buffer);
- } else {
- if (mmu_enabled) {
- retval = aarch64_check_address(target, address);
- if (retval != ERROR_OK)
- return retval;
- /* enable MMU as we could have disabled it for phys access */
- retval = aarch64_mmu_modify(target, 1);
- if (retval != ERROR_OK)
- return retval;
- }
- retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
+ if (mmu_enabled) {
+ retval = aarch64_check_address(target, address);
+ if (retval != ERROR_OK)
+ return retval;
+ /* enable MMU as we could have disabled it for phys access */
+ retval = aarch64_mmu_modify(target, 1);
+ if (retval != ERROR_OK)
+ return retval;
}
- return retval;
+ return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
}
static int aarch64_handle_target_request(void *priv)
armv8->debug_ap->memaccess_tck = 80;
- /* Search for the AHB-AB */
- armv8->memory_ap_available = false;
- retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
- if (retval == ERROR_OK) {
- retval = mem_ap_init(armv8->memory_ap);
- if (retval == ERROR_OK)
- armv8->memory_ap_available = true;
- }
- if (retval != ERROR_OK) {
- /* AHB-AP not found or unavailable - use the CPU */
- LOG_DEBUG("No AHB-AP available for memory access");
- }
-
-
if (!target->dbgbase_set) {
uint32_t dbgbase;
/* Get ROM Table base */
} else
armv8->cti_base = target->ctibase;
- retval = mem_ap_write_atomic_u32(armv8->debug_ap,
- armv8->cti_base + CTI_UNLOCK , 0xC5ACCE55);
- if (retval != ERROR_OK)
- return retval;
-
-
armv8->arm.core_type = ARM_MODE_MON;
retval = aarch64_dpm_setup(aarch64, debug);
if (retval != ERROR_OK)
static int aarch64_virt2phys(struct target *target, target_addr_t virt,
target_addr_t *phys)
{
- int retval = ERROR_FAIL;
- struct armv8_common *armv8 = target_to_armv8(target);
- struct adiv5_dap *swjdp = armv8->arm.dap;
- uint8_t apsel = swjdp->apsel;
- if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
- uint32_t ret;
- retval = armv8_mmu_translate_va(target,
- virt, &ret);
- if (retval != ERROR_OK)
- goto done;
- *phys = ret;
- } else {
- LOG_ERROR("AAR64 processor not support translate va to pa");
- }
-done:
- return retval;
+ return armv8_mmu_translate_va(target, virt, phys);
}
COMMAND_HANDLER(aarch64_handle_cache_info_command)