1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
30 #include "breakpoints.h"
32 #include "target_type.h"
34 #include "arm_simulator.h"
35 #include "arm_disassembler.h"
36 #include <helper/time_support.h>
39 #include "arm_opcodes.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
63 /* forward declarations */
64 static int xscale_resume(struct target *, int current,
65 uint32_t address, int handle_breakpoints, int debug_execution);
66 static int xscale_debug_entry(struct target *);
67 static int xscale_restore_banked(struct target *);
68 static int xscale_get_reg(struct reg *reg);
69 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
70 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
71 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
72 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
73 static int xscale_read_trace(struct target *);
76 /* This XScale "debug handler" is loaded into the processor's
77 * mini-ICache, which is 2K of code writable only via JTAG.
79 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
80 * binary files cleanly. It's string oriented, and terminates them
81 * with a NUL character. Better would be to generate the constants
82 * and let other code decide names, scoping, and other housekeeping.
84 static /* unsigned const char xscale_debug_handler[] = ... */
85 #include "xscale_debug.h"
87 static char *const xscale_reg_list[] =
89 "XSCALE_MAINID", /* 0 */
99 "XSCALE_IBCR0", /* 10 */
109 "XSCALE_RX", /* 20 */
113 static const struct xscale_reg xscale_reg_arch_info[] =
115 {XSCALE_MAINID, NULL},
116 {XSCALE_CACHETYPE, NULL},
118 {XSCALE_AUXCTRL, NULL},
124 {XSCALE_CPACCESS, NULL},
125 {XSCALE_IBCR0, NULL},
126 {XSCALE_IBCR1, NULL},
129 {XSCALE_DBCON, NULL},
130 {XSCALE_TBREG, NULL},
131 {XSCALE_CHKPT0, NULL},
132 {XSCALE_CHKPT1, NULL},
133 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
134 {-1, NULL}, /* TX accessed via JTAG */
135 {-1, NULL}, /* RX accessed via JTAG */
136 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
139 /* convenience wrapper to access XScale specific registers */
140 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
144 buf_set_u32(buf, 0, 32, value);
146 return xscale_set_reg(reg, buf);
149 static const char xscale_not[] = "target is not an XScale";
151 static int xscale_verify_pointer(struct command_context *cmd_ctx,
152 struct xscale_common *xscale)
154 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
155 command_print(cmd_ctx, xscale_not);
156 return ERROR_TARGET_INVALID;
161 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
166 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
168 struct scan_field field;
171 memset(&field, 0, sizeof field);
172 field.num_bits = tap->ir_length;
173 field.out_value = scratch;
174 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
176 jtag_add_ir_scan(tap, &field, end_state);
182 static int xscale_read_dcsr(struct target *target)
184 struct xscale_common *xscale = target_to_xscale(target);
186 struct scan_field fields[3];
187 uint8_t field0 = 0x0;
188 uint8_t field0_check_value = 0x2;
189 uint8_t field0_check_mask = 0x7;
190 uint8_t field2 = 0x0;
191 uint8_t field2_check_value = 0x0;
192 uint8_t field2_check_mask = 0x1;
194 jtag_set_end_state(TAP_DRPAUSE);
195 xscale_jtag_set_instr(target->tap,
196 XSCALE_SELDCSR << xscale->xscale_variant,
199 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
200 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
202 memset(&fields, 0, sizeof fields);
204 fields[0].num_bits = 3;
205 fields[0].out_value = &field0;
207 fields[0].in_value = &tmp;
209 fields[1].num_bits = 32;
210 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
212 fields[2].num_bits = 1;
213 fields[2].out_value = &field2;
215 fields[2].in_value = &tmp2;
217 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
219 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
220 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
222 if ((retval = jtag_execute_queue()) != ERROR_OK)
224 LOG_ERROR("JTAG error while reading DCSR");
228 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
229 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
231 /* write the register with the value we just read
232 * on this second pass, only the first bit of field0 is guaranteed to be 0)
234 field0_check_mask = 0x1;
235 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
236 fields[1].in_value = NULL;
238 jtag_set_end_state(TAP_IDLE);
240 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
242 /* DANGER!!! this must be here. It will make sure that the arguments
243 * to jtag_set_check_value() does not go out of scope! */
244 return jtag_execute_queue();
248 static void xscale_getbuf(jtag_callback_data_t arg)
250 uint8_t *in = (uint8_t *)arg;
251 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
254 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
257 return ERROR_INVALID_ARGUMENTS;
259 struct xscale_common *xscale = target_to_xscale(target);
260 int retval = ERROR_OK;
262 struct scan_field fields[3];
263 uint8_t *field0 = malloc(num_words * 1);
264 uint8_t field0_check_value = 0x2;
265 uint8_t field0_check_mask = 0x6;
266 uint32_t *field1 = malloc(num_words * 4);
267 uint8_t field2_check_value = 0x0;
268 uint8_t field2_check_mask = 0x1;
270 int words_scheduled = 0;
273 path[0] = TAP_DRSELECT;
274 path[1] = TAP_DRCAPTURE;
275 path[2] = TAP_DRSHIFT;
277 memset(&fields, 0, sizeof fields);
279 fields[0].num_bits = 3;
280 fields[0].check_value = &field0_check_value;
281 fields[0].check_mask = &field0_check_mask;
283 fields[1].num_bits = 32;
285 fields[2].num_bits = 1;
286 fields[2].check_value = &field2_check_value;
287 fields[2].check_mask = &field2_check_mask;
289 jtag_set_end_state(TAP_IDLE);
290 xscale_jtag_set_instr(target->tap,
291 XSCALE_DBGTX << xscale->xscale_variant,
293 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above could be a no-op */
295 /* repeat until all words have been collected */
297 while (words_done < num_words)
301 for (i = words_done; i < num_words; i++)
303 fields[0].in_value = &field0[i];
305 jtag_add_pathmove(3, path);
307 fields[1].in_value = (uint8_t *)(field1 + i);
309 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
311 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
316 if ((retval = jtag_execute_queue()) != ERROR_OK)
318 LOG_ERROR("JTAG error while receiving data from debug handler");
322 /* examine results */
323 for (i = words_done; i < num_words; i++)
325 if (!(field0[0] & 1))
327 /* move backwards if necessary */
329 for (j = i; j < num_words - 1; j++)
331 field0[j] = field0[j + 1];
332 field1[j] = field1[j + 1];
337 if (words_scheduled == 0)
339 if (attempts++==1000)
341 LOG_ERROR("Failed to receiving data from debug handler after 1000 attempts");
342 retval = ERROR_TARGET_TIMEOUT;
347 words_done += words_scheduled;
350 for (i = 0; i < num_words; i++)
351 *(buffer++) = buf_get_u32((uint8_t*)&field1[i], 0, 32);
358 static int xscale_read_tx(struct target *target, int consume)
360 struct xscale_common *xscale = target_to_xscale(target);
362 tap_state_t noconsume_path[6];
364 struct timeval timeout, now;
365 struct scan_field fields[3];
366 uint8_t field0_in = 0x0;
367 uint8_t field0_check_value = 0x2;
368 uint8_t field0_check_mask = 0x6;
369 uint8_t field2_check_value = 0x0;
370 uint8_t field2_check_mask = 0x1;
372 jtag_set_end_state(TAP_IDLE);
374 xscale_jtag_set_instr(target->tap,
375 XSCALE_DBGTX << xscale->xscale_variant,
378 path[0] = TAP_DRSELECT;
379 path[1] = TAP_DRCAPTURE;
380 path[2] = TAP_DRSHIFT;
382 noconsume_path[0] = TAP_DRSELECT;
383 noconsume_path[1] = TAP_DRCAPTURE;
384 noconsume_path[2] = TAP_DREXIT1;
385 noconsume_path[3] = TAP_DRPAUSE;
386 noconsume_path[4] = TAP_DREXIT2;
387 noconsume_path[5] = TAP_DRSHIFT;
389 memset(&fields, 0, sizeof fields);
391 fields[0].num_bits = 3;
392 fields[0].in_value = &field0_in;
394 fields[1].num_bits = 32;
395 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
397 fields[2].num_bits = 1;
399 fields[2].in_value = &tmp;
401 gettimeofday(&timeout, NULL);
402 timeval_add_time(&timeout, 1, 0);
406 /* if we want to consume the register content (i.e. clear TX_READY),
407 * we have to go straight from Capture-DR to Shift-DR
408 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
411 jtag_add_pathmove(3, path);
414 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
417 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
419 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
420 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
422 if ((retval = jtag_execute_queue()) != ERROR_OK)
424 LOG_ERROR("JTAG error while reading TX");
425 return ERROR_TARGET_TIMEOUT;
428 gettimeofday(&now, NULL);
429 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
431 LOG_ERROR("time out reading TX register");
432 return ERROR_TARGET_TIMEOUT;
434 if (!((!(field0_in & 1)) && consume))
438 if (debug_level >= 3)
440 LOG_DEBUG("waiting 100ms");
441 alive_sleep(100); /* avoid flooding the logs */
449 if (!(field0_in & 1))
450 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
455 static int xscale_write_rx(struct target *target)
457 struct xscale_common *xscale = target_to_xscale(target);
459 struct timeval timeout, now;
460 struct scan_field fields[3];
461 uint8_t field0_out = 0x0;
462 uint8_t field0_in = 0x0;
463 uint8_t field0_check_value = 0x2;
464 uint8_t field0_check_mask = 0x6;
465 uint8_t field2 = 0x0;
466 uint8_t field2_check_value = 0x0;
467 uint8_t field2_check_mask = 0x1;
469 jtag_set_end_state(TAP_IDLE);
471 xscale_jtag_set_instr(target->tap,
472 XSCALE_DBGRX << xscale->xscale_variant,
475 memset(&fields, 0, sizeof fields);
477 fields[0].num_bits = 3;
478 fields[0].out_value = &field0_out;
479 fields[0].in_value = &field0_in;
481 fields[1].num_bits = 32;
482 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
484 fields[2].num_bits = 1;
485 fields[2].out_value = &field2;
487 fields[2].in_value = &tmp;
489 gettimeofday(&timeout, NULL);
490 timeval_add_time(&timeout, 1, 0);
492 /* poll until rx_read is low */
493 LOG_DEBUG("polling RX");
496 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
498 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
499 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
501 if ((retval = jtag_execute_queue()) != ERROR_OK)
503 LOG_ERROR("JTAG error while writing RX");
507 gettimeofday(&now, NULL);
508 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
510 LOG_ERROR("time out writing RX register");
511 return ERROR_TARGET_TIMEOUT;
513 if (!(field0_in & 1))
515 if (debug_level >= 3)
517 LOG_DEBUG("waiting 100ms");
518 alive_sleep(100); /* avoid flooding the logs */
528 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
530 if ((retval = jtag_execute_queue()) != ERROR_OK)
532 LOG_ERROR("JTAG error while writing RX");
539 /* send count elements of size byte to the debug handler */
540 static int xscale_send(struct target *target, uint8_t *buffer, int count, int size)
542 struct xscale_common *xscale = target_to_xscale(target);
548 jtag_set_end_state(TAP_IDLE);
550 xscale_jtag_set_instr(target->tap,
551 XSCALE_DBGRX << xscale->xscale_variant,
559 int endianness = target->endianness;
560 while (done_count++ < count)
565 if (endianness == TARGET_LITTLE_ENDIAN)
567 t[1]=le_to_h_u32(buffer);
570 t[1]=be_to_h_u32(buffer);
574 if (endianness == TARGET_LITTLE_ENDIAN)
576 t[1]=le_to_h_u16(buffer);
579 t[1]=be_to_h_u16(buffer);
586 LOG_ERROR("BUG: size neither 4, 2 nor 1");
587 return ERROR_INVALID_ARGUMENTS;
589 jtag_add_dr_out(target->tap,
597 if ((retval = jtag_execute_queue()) != ERROR_OK)
599 LOG_ERROR("JTAG error while sending data to debug handler");
606 static int xscale_send_u32(struct target *target, uint32_t value)
608 struct xscale_common *xscale = target_to_xscale(target);
610 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
611 return xscale_write_rx(target);
614 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
616 struct xscale_common *xscale = target_to_xscale(target);
618 struct scan_field fields[3];
619 uint8_t field0 = 0x0;
620 uint8_t field0_check_value = 0x2;
621 uint8_t field0_check_mask = 0x7;
622 uint8_t field2 = 0x0;
623 uint8_t field2_check_value = 0x0;
624 uint8_t field2_check_mask = 0x1;
627 xscale->hold_rst = hold_rst;
629 if (ext_dbg_brk != -1)
630 xscale->external_debug_break = ext_dbg_brk;
632 jtag_set_end_state(TAP_IDLE);
633 xscale_jtag_set_instr(target->tap,
634 XSCALE_SELDCSR << xscale->xscale_variant,
637 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
638 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
640 memset(&fields, 0, sizeof fields);
642 fields[0].num_bits = 3;
643 fields[0].out_value = &field0;
645 fields[0].in_value = &tmp;
647 fields[1].num_bits = 32;
648 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
650 fields[2].num_bits = 1;
651 fields[2].out_value = &field2;
653 fields[2].in_value = &tmp2;
655 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
657 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
658 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
660 if ((retval = jtag_execute_queue()) != ERROR_OK)
662 LOG_ERROR("JTAG error while writing DCSR");
666 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
667 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
672 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
673 static unsigned int parity (unsigned int v)
675 // unsigned int ov = v;
680 // LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
681 return (0x6996 >> v) & 1;
684 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
686 struct xscale_common *xscale = target_to_xscale(target);
690 struct scan_field fields[2];
692 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
695 jtag_set_end_state(TAP_IDLE);
696 xscale_jtag_set_instr(target->tap,
697 XSCALE_LDIC << xscale->xscale_variant,
700 /* CMD is b011 to load a cacheline into the Mini ICache.
701 * Loading into the main ICache is deprecated, and unused.
702 * It's followed by three zero bits, and 27 address bits.
704 buf_set_u32(&cmd, 0, 6, 0x3);
706 /* virtual address of desired cache line */
707 buf_set_u32(packet, 0, 27, va >> 5);
709 memset(&fields, 0, sizeof fields);
711 fields[0].num_bits = 6;
712 fields[0].out_value = &cmd;
714 fields[1].num_bits = 27;
715 fields[1].out_value = packet;
717 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
719 /* rest of packet is a cacheline: 8 instructions, with parity */
720 fields[0].num_bits = 32;
721 fields[0].out_value = packet;
723 fields[1].num_bits = 1;
724 fields[1].out_value = &cmd;
726 for (word = 0; word < 8; word++)
728 buf_set_u32(packet, 0, 32, buffer[word]);
731 memcpy(&value, packet, sizeof(uint32_t));
734 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
737 return jtag_execute_queue();
740 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
742 struct xscale_common *xscale = target_to_xscale(target);
745 struct scan_field fields[2];
747 jtag_set_end_state(TAP_IDLE);
748 xscale_jtag_set_instr(target->tap,
749 XSCALE_LDIC << xscale->xscale_variant,
752 /* CMD for invalidate IC line b000, bits [6:4] b000 */
753 buf_set_u32(&cmd, 0, 6, 0x0);
755 /* virtual address of desired cache line */
756 buf_set_u32(packet, 0, 27, va >> 5);
758 memset(&fields, 0, sizeof fields);
760 fields[0].num_bits = 6;
761 fields[0].out_value = &cmd;
763 fields[1].num_bits = 27;
764 fields[1].out_value = packet;
766 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
771 static int xscale_update_vectors(struct target *target)
773 struct xscale_common *xscale = target_to_xscale(target);
777 uint32_t low_reset_branch, high_reset_branch;
779 for (i = 1; i < 8; i++)
781 /* if there's a static vector specified for this exception, override */
782 if (xscale->static_high_vectors_set & (1 << i))
784 xscale->high_vectors[i] = xscale->static_high_vectors[i];
788 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
789 if (retval == ERROR_TARGET_TIMEOUT)
791 if (retval != ERROR_OK)
793 /* Some of these reads will fail as part of normal execution */
794 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
799 for (i = 1; i < 8; i++)
801 if (xscale->static_low_vectors_set & (1 << i))
803 xscale->low_vectors[i] = xscale->static_low_vectors[i];
807 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
808 if (retval == ERROR_TARGET_TIMEOUT)
810 if (retval != ERROR_OK)
812 /* Some of these reads will fail as part of normal execution */
813 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
818 /* calculate branches to debug handler */
819 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
820 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
822 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
823 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
825 /* invalidate and load exception vectors in mini i-cache */
826 xscale_invalidate_ic_line(target, 0x0);
827 xscale_invalidate_ic_line(target, 0xffff0000);
829 xscale_load_ic(target, 0x0, xscale->low_vectors);
830 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
835 static int xscale_arch_state(struct target *target)
837 struct xscale_common *xscale = target_to_xscale(target);
838 struct arm *armv4_5 = &xscale->armv4_5_common;
840 static const char *state[] =
842 "disabled", "enabled"
845 static const char *arch_dbg_reason[] =
847 "", "\n(processor reset)", "\n(trace buffer full)"
850 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
852 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
853 return ERROR_INVALID_ARGUMENTS;
856 arm_arch_state(target);
857 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
858 state[xscale->armv4_5_mmu.mmu_enabled],
859 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
860 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
861 arch_dbg_reason[xscale->arch_debug_reason]);
866 static int xscale_poll(struct target *target)
868 int retval = ERROR_OK;
870 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
872 enum target_state previous_state = target->state;
873 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
876 /* there's data to read from the tx register, we entered debug state */
877 target->state = TARGET_HALTED;
879 /* process debug entry, fetching current mode regs */
880 retval = xscale_debug_entry(target);
882 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
884 LOG_USER("error while polling TX register, reset CPU");
885 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
886 target->state = TARGET_HALTED;
889 /* debug_entry could have overwritten target state (i.e. immediate resume)
890 * don't signal event handlers in that case
892 if (target->state != TARGET_HALTED)
895 /* if target was running, signal that we halted
896 * otherwise we reentered from debug execution */
897 if (previous_state == TARGET_RUNNING)
898 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
900 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
906 static int xscale_debug_entry(struct target *target)
908 struct xscale_common *xscale = target_to_xscale(target);
909 struct arm *armv4_5 = &xscale->armv4_5_common;
916 /* clear external dbg break (will be written on next DCSR read) */
917 xscale->external_debug_break = 0;
918 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
921 /* get r0, pc, r1 to r7 and cpsr */
922 if ((retval = xscale_receive(target, buffer, 10)) != ERROR_OK)
925 /* move r0 from buffer to register cache */
926 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
927 armv4_5->core_cache->reg_list[0].dirty = 1;
928 armv4_5->core_cache->reg_list[0].valid = 1;
929 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
931 /* move pc from buffer to register cache */
932 buf_set_u32(armv4_5->pc->value, 0, 32, buffer[1]);
933 armv4_5->pc->dirty = 1;
934 armv4_5->pc->valid = 1;
935 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
937 /* move data from buffer to register cache */
938 for (i = 1; i <= 7; i++)
940 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
941 armv4_5->core_cache->reg_list[i].dirty = 1;
942 armv4_5->core_cache->reg_list[i].valid = 1;
943 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
946 arm_set_cpsr(armv4_5, buffer[9]);
947 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
949 if (!is_arm_mode(armv4_5->core_mode))
951 target->state = TARGET_UNKNOWN;
952 LOG_ERROR("cpsr contains invalid mode value - communication failure");
953 return ERROR_TARGET_FAILURE;
955 LOG_DEBUG("target entered debug state in %s mode",
956 arm_mode_name(armv4_5->core_mode));
958 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
960 xscale_receive(target, buffer, 8);
961 buf_set_u32(armv4_5->spsr->value, 0, 32, buffer[7]);
962 armv4_5->spsr->dirty = false;
963 armv4_5->spsr->valid = true;
967 /* r8 to r14, but no spsr */
968 xscale_receive(target, buffer, 7);
971 /* move data from buffer to right banked register in cache */
972 for (i = 8; i <= 14; i++)
974 struct reg *r = arm_reg_current(armv4_5, i);
976 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
981 /* examine debug reason */
982 xscale_read_dcsr(target);
983 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
985 /* stored PC (for calculating fixup) */
986 pc = buf_get_u32(armv4_5->pc->value, 0, 32);
990 case 0x0: /* Processor reset */
991 target->debug_reason = DBG_REASON_DBGRQ;
992 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
995 case 0x1: /* Instruction breakpoint hit */
996 target->debug_reason = DBG_REASON_BREAKPOINT;
997 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1000 case 0x2: /* Data breakpoint hit */
1001 target->debug_reason = DBG_REASON_WATCHPOINT;
1002 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1005 case 0x3: /* BKPT instruction executed */
1006 target->debug_reason = DBG_REASON_BREAKPOINT;
1007 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1010 case 0x4: /* Ext. debug event */
1011 target->debug_reason = DBG_REASON_DBGRQ;
1012 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1015 case 0x5: /* Vector trap occured */
1016 target->debug_reason = DBG_REASON_BREAKPOINT;
1017 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
1020 case 0x6: /* Trace buffer full break */
1021 target->debug_reason = DBG_REASON_DBGRQ;
1022 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
1025 case 0x7: /* Reserved (may flag Hot-Debug support) */
1027 LOG_ERROR("Method of Entry is 'Reserved'");
1032 /* apply PC fixup */
1033 buf_set_u32(armv4_5->pc->value, 0, 32, pc);
1035 /* on the first debug entry, identify cache type */
1036 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
1038 uint32_t cache_type_reg;
1040 /* read cp15 cache type register */
1041 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
1042 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
1044 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
1047 /* examine MMU and Cache settings */
1048 /* read cp15 control register */
1049 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1050 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
1051 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
1052 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1053 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1055 /* tracing enabled, read collected trace data */
1056 if (xscale->trace.buffer_enabled)
1058 xscale_read_trace(target);
1059 xscale->trace.buffer_fill--;
1061 /* resume if we're still collecting trace data */
1062 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
1063 && (xscale->trace.buffer_fill > 0))
1065 xscale_resume(target, 1, 0x0, 1, 0);
1069 xscale->trace.buffer_enabled = 0;
1076 static int xscale_halt(struct target *target)
1078 struct xscale_common *xscale = target_to_xscale(target);
1080 LOG_DEBUG("target->state: %s",
1081 target_state_name(target));
1083 if (target->state == TARGET_HALTED)
1085 LOG_DEBUG("target was already halted");
1088 else if (target->state == TARGET_UNKNOWN)
1090 /* this must not happen for a xscale target */
1091 LOG_ERROR("target was in unknown state when halt was requested");
1092 return ERROR_TARGET_INVALID;
1094 else if (target->state == TARGET_RESET)
1096 LOG_DEBUG("target->state == TARGET_RESET");
1100 /* assert external dbg break */
1101 xscale->external_debug_break = 1;
1102 xscale_read_dcsr(target);
1104 target->debug_reason = DBG_REASON_DBGRQ;
1110 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1112 struct xscale_common *xscale = target_to_xscale(target);
1113 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1116 if (xscale->ibcr0_used)
1118 struct breakpoint *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1122 xscale_unset_breakpoint(target, ibcr0_bp);
1126 LOG_ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1131 if ((retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1)) != ERROR_OK)
1137 static int xscale_disable_single_step(struct target *target)
1139 struct xscale_common *xscale = target_to_xscale(target);
1140 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1143 if ((retval = xscale_set_reg_u32(ibcr0, 0x0)) != ERROR_OK)
1149 static void xscale_enable_watchpoints(struct target *target)
1151 struct watchpoint *watchpoint = target->watchpoints;
1155 if (watchpoint->set == 0)
1156 xscale_set_watchpoint(target, watchpoint);
1157 watchpoint = watchpoint->next;
1161 static void xscale_enable_breakpoints(struct target *target)
1163 struct breakpoint *breakpoint = target->breakpoints;
1165 /* set any pending breakpoints */
1168 if (breakpoint->set == 0)
1169 xscale_set_breakpoint(target, breakpoint);
1170 breakpoint = breakpoint->next;
1174 static int xscale_resume(struct target *target, int current,
1175 uint32_t address, int handle_breakpoints, int debug_execution)
1177 struct xscale_common *xscale = target_to_xscale(target);
1178 struct arm *armv4_5 = &xscale->armv4_5_common;
1179 struct breakpoint *breakpoint = target->breakpoints;
1180 uint32_t current_pc;
1186 if (target->state != TARGET_HALTED)
1188 LOG_WARNING("target not halted");
1189 return ERROR_TARGET_NOT_HALTED;
1192 if (!debug_execution)
1194 target_free_all_working_areas(target);
1197 /* update vector tables */
1198 if ((retval = xscale_update_vectors(target)) != ERROR_OK)
1201 /* current = 1: continue on current pc, otherwise continue at <address> */
1203 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1205 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1207 /* if we're at the reset vector, we have to simulate the branch */
1208 if (current_pc == 0x0)
1210 arm_simulate_step(target, NULL);
1211 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1214 /* the front-end may request us not to handle breakpoints */
1215 if (handle_breakpoints)
1217 breakpoint = breakpoint_find(target,
1218 buf_get_u32(armv4_5->pc->value, 0, 32));
1219 if (breakpoint != NULL)
1223 /* there's a breakpoint at the current PC, we have to step over it */
1224 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1225 xscale_unset_breakpoint(target, breakpoint);
1227 /* calculate PC of next instruction */
1228 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1230 uint32_t current_opcode;
1231 target_read_u32(target, current_pc, ¤t_opcode);
1232 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1235 LOG_DEBUG("enable single-step");
1236 xscale_enable_single_step(target, next_pc);
1238 /* restore banked registers */
1239 retval = xscale_restore_banked(target);
1241 /* send resume request (command 0x30 or 0x31)
1242 * clean the trace buffer if it is to be enabled (0x62) */
1243 if (xscale->trace.buffer_enabled)
1245 xscale_send_u32(target, 0x62);
1246 xscale_send_u32(target, 0x31);
1249 xscale_send_u32(target, 0x30);
1252 xscale_send_u32(target,
1253 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1254 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1255 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1257 for (i = 7; i >= 0; i--)
1260 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1261 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1265 xscale_send_u32(target,
1266 buf_get_u32(armv4_5->pc->value, 0, 32));
1267 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1268 buf_get_u32(armv4_5->pc->value, 0, 32));
1270 /* wait for and process debug entry */
1271 xscale_debug_entry(target);
1273 LOG_DEBUG("disable single-step");
1274 xscale_disable_single_step(target);
1276 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1277 xscale_set_breakpoint(target, breakpoint);
1281 /* enable any pending breakpoints and watchpoints */
1282 xscale_enable_breakpoints(target);
1283 xscale_enable_watchpoints(target);
1285 /* restore banked registers */
1286 retval = xscale_restore_banked(target);
1288 /* send resume request (command 0x30 or 0x31)
1289 * clean the trace buffer if it is to be enabled (0x62) */
1290 if (xscale->trace.buffer_enabled)
1292 xscale_send_u32(target, 0x62);
1293 xscale_send_u32(target, 0x31);
1296 xscale_send_u32(target, 0x30);
1299 xscale_send_u32(target, buf_get_u32(armv4_5->cpsr->value, 0, 32));
1300 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1301 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1303 for (i = 7; i >= 0; i--)
1306 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1307 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1311 xscale_send_u32(target, buf_get_u32(armv4_5->pc->value, 0, 32));
1312 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1313 buf_get_u32(armv4_5->pc->value, 0, 32));
1315 target->debug_reason = DBG_REASON_NOTHALTED;
1317 if (!debug_execution)
1319 /* registers are now invalid */
1320 register_cache_invalidate(armv4_5->core_cache);
1321 target->state = TARGET_RUNNING;
1322 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1326 target->state = TARGET_DEBUG_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1330 LOG_DEBUG("target resumed");
1335 static int xscale_step_inner(struct target *target, int current,
1336 uint32_t address, int handle_breakpoints)
1338 struct xscale_common *xscale = target_to_xscale(target);
1339 struct arm *armv4_5 = &xscale->armv4_5_common;
1344 target->debug_reason = DBG_REASON_SINGLESTEP;
1346 /* calculate PC of next instruction */
1347 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
1349 uint32_t current_opcode, current_pc;
1350 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1352 target_read_u32(target, current_pc, ¤t_opcode);
1353 LOG_ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "", current_opcode);
1357 LOG_DEBUG("enable single-step");
1358 if ((retval = xscale_enable_single_step(target, next_pc)) != ERROR_OK)
1361 /* restore banked registers */
1362 if ((retval = xscale_restore_banked(target)) != ERROR_OK)
1365 /* send resume request (command 0x30 or 0x31)
1366 * clean the trace buffer if it is to be enabled (0x62) */
1367 if (xscale->trace.buffer_enabled)
1369 if ((retval = xscale_send_u32(target, 0x62)) != ERROR_OK)
1371 if ((retval = xscale_send_u32(target, 0x31)) != ERROR_OK)
1375 if ((retval = xscale_send_u32(target, 0x30)) != ERROR_OK)
1379 retval = xscale_send_u32(target,
1380 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1381 if (retval != ERROR_OK)
1383 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1384 buf_get_u32(armv4_5->cpsr->value, 0, 32));
1386 for (i = 7; i >= 0; i--)
1389 if ((retval = xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32))) != ERROR_OK)
1391 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
1395 retval = xscale_send_u32(target,
1396 buf_get_u32(armv4_5->pc->value, 0, 32));
1397 if (retval != ERROR_OK)
1399 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1400 buf_get_u32(armv4_5->pc->value, 0, 32));
1402 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1404 /* registers are now invalid */
1405 register_cache_invalidate(armv4_5->core_cache);
1407 /* wait for and process debug entry */
1408 if ((retval = xscale_debug_entry(target)) != ERROR_OK)
1411 LOG_DEBUG("disable single-step");
1412 if ((retval = xscale_disable_single_step(target)) != ERROR_OK)
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1420 static int xscale_step(struct target *target, int current,
1421 uint32_t address, int handle_breakpoints)
1423 struct arm *armv4_5 = target_to_arm(target);
1424 struct breakpoint *breakpoint = NULL;
1426 uint32_t current_pc;
1429 if (target->state != TARGET_HALTED)
1431 LOG_WARNING("target not halted");
1432 return ERROR_TARGET_NOT_HALTED;
1435 /* current = 1: continue on current pc, otherwise continue at <address> */
1437 buf_set_u32(armv4_5->pc->value, 0, 32, address);
1439 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1441 /* if we're at the reset vector, we have to simulate the step */
1442 if (current_pc == 0x0)
1444 if ((retval = arm_simulate_step(target, NULL)) != ERROR_OK)
1446 current_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
1448 target->debug_reason = DBG_REASON_SINGLESTEP;
1449 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1454 /* the front-end may request us not to handle breakpoints */
1455 if (handle_breakpoints)
1456 breakpoint = breakpoint_find(target,
1457 buf_get_u32(armv4_5->pc->value, 0, 32));
1458 if (breakpoint != NULL) {
1459 retval = xscale_unset_breakpoint(target, breakpoint);
1460 if (retval != ERROR_OK)
1464 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1468 xscale_set_breakpoint(target, breakpoint);
1471 LOG_DEBUG("target stepped");
1477 static int xscale_assert_reset(struct target *target)
1479 struct xscale_common *xscale = target_to_xscale(target);
1481 LOG_DEBUG("target->state: %s",
1482 target_state_name(target));
1484 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1485 * end up in T-L-R, which would reset JTAG
1487 jtag_set_end_state(TAP_IDLE);
1488 xscale_jtag_set_instr(target->tap,
1489 XSCALE_SELDCSR << xscale->xscale_variant,
1492 /* set Hold reset, Halt mode and Trap Reset */
1493 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1494 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1495 xscale_write_dcsr(target, 1, 0);
1497 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1498 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1499 jtag_execute_queue();
1502 jtag_add_reset(0, 1);
1504 /* sleep 1ms, to be sure we fulfill any requirements */
1505 jtag_add_sleep(1000);
1506 jtag_execute_queue();
1508 target->state = TARGET_RESET;
1510 if (target->reset_halt)
1513 if ((retval = target_halt(target)) != ERROR_OK)
1520 static int xscale_deassert_reset(struct target *target)
1522 struct xscale_common *xscale = target_to_xscale(target);
1523 struct breakpoint *breakpoint = target->breakpoints;
1527 xscale->ibcr_available = 2;
1528 xscale->ibcr0_used = 0;
1529 xscale->ibcr1_used = 0;
1531 xscale->dbr_available = 2;
1532 xscale->dbr0_used = 0;
1533 xscale->dbr1_used = 0;
1535 /* mark all hardware breakpoints as unset */
1538 if (breakpoint->type == BKPT_HARD)
1540 breakpoint->set = 0;
1542 breakpoint = breakpoint->next;
1545 register_cache_invalidate(xscale->armv4_5_common.core_cache);
1547 /* FIXME mark hardware watchpoints got unset too. Also,
1548 * at least some of the XScale registers are invalid...
1552 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1553 * contents got invalidated. Safer to force that, so writing new
1554 * contents can't ever fail..
1559 const uint8_t *buffer = xscale_debug_handler;
1563 jtag_add_reset(0, 0);
1565 /* wait 300ms; 150 and 100ms were not enough */
1566 jtag_add_sleep(300*1000);
1568 jtag_add_runtest(2030, TAP_IDLE);
1569 jtag_execute_queue();
1571 /* set Hold reset, Halt mode and Trap Reset */
1572 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1574 xscale_write_dcsr(target, 1, 0);
1576 /* Load the debug handler into the mini-icache. Since
1577 * it's using halt mode (not monitor mode), it runs in
1578 * "Special Debug State" for access to registers, memory,
1579 * coprocessors, trace data, etc.
1581 address = xscale->handler_address;
1582 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1584 binary_size -= buf_cnt, buffer += buf_cnt)
1586 uint32_t cache_line[8];
1589 buf_cnt = binary_size;
1593 for (i = 0; i < buf_cnt; i += 4)
1595 /* convert LE buffer to host-endian uint32_t */
1596 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1599 for (; i < 32; i += 4)
1601 cache_line[i / 4] = 0xe1a08008;
1604 /* only load addresses other than the reset vectors */
1605 if ((address % 0x400) != 0x0)
1607 retval = xscale_load_ic(target, address,
1609 if (retval != ERROR_OK)
1616 retval = xscale_load_ic(target, 0x0,
1617 xscale->low_vectors);
1618 if (retval != ERROR_OK)
1620 retval = xscale_load_ic(target, 0xffff0000,
1621 xscale->high_vectors);
1622 if (retval != ERROR_OK)
1625 jtag_add_runtest(30, TAP_IDLE);
1627 jtag_add_sleep(100000);
1629 /* set Hold reset, Halt mode and Trap Reset */
1630 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1631 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1632 xscale_write_dcsr(target, 1, 0);
1634 /* clear Hold reset to let the target run (should enter debug handler) */
1635 xscale_write_dcsr(target, 0, 1);
1636 target->state = TARGET_RUNNING;
1638 if (!target->reset_halt)
1640 jtag_add_sleep(10000);
1642 /* we should have entered debug now */
1643 xscale_debug_entry(target);
1644 target->state = TARGET_HALTED;
1646 /* resume the target */
1647 xscale_resume(target, 1, 0x0, 1, 0);
1654 static int xscale_read_core_reg(struct target *target, struct reg *r,
1655 int num, enum arm_mode mode)
1657 /** \todo add debug handler support for core register reads */
1658 LOG_ERROR("not implemented");
1662 static int xscale_write_core_reg(struct target *target, struct reg *r,
1663 int num, enum arm_mode mode, uint32_t value)
1665 /** \todo add debug handler support for core register writes */
1666 LOG_ERROR("not implemented");
1670 static int xscale_full_context(struct target *target)
1672 struct arm *armv4_5 = target_to_arm(target);
1680 if (target->state != TARGET_HALTED)
1682 LOG_WARNING("target not halted");
1683 return ERROR_TARGET_NOT_HALTED;
1686 buffer = malloc(4 * 8);
1688 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1689 * we can't enter User mode on an XScale (unpredictable),
1690 * but User shares registers with SYS
1692 for (i = 1; i < 7; i++)
1694 enum arm_mode mode = armv4_5_number_to_mode(i);
1698 if (mode == ARM_MODE_USR)
1701 /* check if there are invalid registers in the current mode
1703 for (j = 0; valid && j <= 16; j++)
1705 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1712 /* request banked registers */
1713 xscale_send_u32(target, 0x0);
1715 /* send CPSR for desired bank mode */
1716 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1718 /* get banked registers: r8 to r14; and SPSR
1719 * except in USR/SYS mode
1721 if (mode != ARM_MODE_SYS) {
1723 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1726 xscale_receive(target, buffer, 8);
1728 buf_set_u32(r->value, 0, 32, buffer[7]);
1732 xscale_receive(target, buffer, 7);
1735 /* move data from buffer to register cache */
1736 for (j = 8; j <= 14; j++)
1738 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1741 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1752 static int xscale_restore_banked(struct target *target)
1754 struct arm *armv4_5 = target_to_arm(target);
1758 if (target->state != TARGET_HALTED)
1760 LOG_WARNING("target not halted");
1761 return ERROR_TARGET_NOT_HALTED;
1764 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1765 * and check if any banked registers need to be written. Ignore
1766 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1767 * an XScale (unpredictable), but they share all registers.
1769 for (i = 1; i < 7; i++)
1771 enum arm_mode mode = armv4_5_number_to_mode(i);
1774 if (mode == ARM_MODE_USR)
1777 /* check if there are dirty registers in this mode */
1778 for (j = 8; j <= 14; j++)
1780 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1785 /* if not USR/SYS, check if the SPSR needs to be written */
1786 if (mode != ARM_MODE_SYS)
1788 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1793 /* there's nothing to flush for this mode */
1797 /* command 0x1: "send banked registers" */
1798 xscale_send_u32(target, 0x1);
1800 /* send CPSR for desired mode */
1801 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1803 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1804 * but this protocol doesn't understand that nuance.
1806 for (j = 8; j <= 14; j++) {
1807 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1809 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1813 /* send spsr if not in USR/SYS mode */
1814 if (mode != ARM_MODE_SYS) {
1815 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1817 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1825 static int xscale_read_memory(struct target *target, uint32_t address,
1826 uint32_t size, uint32_t count, uint8_t *buffer)
1828 struct xscale_common *xscale = target_to_xscale(target);
1833 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1835 if (target->state != TARGET_HALTED)
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1841 /* sanitize arguments */
1842 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1843 return ERROR_INVALID_ARGUMENTS;
1845 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1846 return ERROR_TARGET_UNALIGNED_ACCESS;
1848 /* send memory read request (command 0x1n, n: access size) */
1849 if ((retval = xscale_send_u32(target, 0x10 | size)) != ERROR_OK)
1852 /* send base address for read request */
1853 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1856 /* send number of requested data words */
1857 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1860 /* receive data from target (count times 32-bit words in host endianness) */
1861 buf32 = malloc(4 * count);
1862 if ((retval = xscale_receive(target, buf32, count)) != ERROR_OK)
1865 /* extract data from host-endian buffer into byte stream */
1866 for (i = 0; i < count; i++)
1871 target_buffer_set_u32(target, buffer, buf32[i]);
1875 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1879 *buffer++ = buf32[i] & 0xff;
1882 LOG_ERROR("invalid read size");
1883 return ERROR_INVALID_ARGUMENTS;
1889 /* examine DCSR, to see if Sticky Abort (SA) got set */
1890 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1892 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1895 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1898 return ERROR_TARGET_DATA_ABORT;
1904 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1905 uint32_t size, uint32_t count, uint8_t *buffer)
1907 struct xscale_common *xscale = target_to_xscale(target);
1909 /* with MMU inactive, there are only physical addresses */
1910 if (!xscale->armv4_5_mmu.mmu_enabled)
1911 return xscale_read_memory(target, address, size, count, buffer);
1913 /** \todo: provide a non-stub implementation of this routine. */
1914 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1915 target_name(target), __func__);
1919 static int xscale_write_memory(struct target *target, uint32_t address,
1920 uint32_t size, uint32_t count, uint8_t *buffer)
1922 struct xscale_common *xscale = target_to_xscale(target);
1925 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32, address, size, count);
1927 if (target->state != TARGET_HALTED)
1929 LOG_WARNING("target not halted");
1930 return ERROR_TARGET_NOT_HALTED;
1933 /* sanitize arguments */
1934 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1935 return ERROR_INVALID_ARGUMENTS;
1937 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1938 return ERROR_TARGET_UNALIGNED_ACCESS;
1940 /* send memory write request (command 0x2n, n: access size) */
1941 if ((retval = xscale_send_u32(target, 0x20 | size)) != ERROR_OK)
1944 /* send base address for read request */
1945 if ((retval = xscale_send_u32(target, address)) != ERROR_OK)
1948 /* send number of requested data words to be written*/
1949 if ((retval = xscale_send_u32(target, count)) != ERROR_OK)
1952 /* extract data from host-endian buffer into byte stream */
1954 for (i = 0; i < count; i++)
1959 value = target_buffer_get_u32(target, buffer);
1960 xscale_send_u32(target, value);
1964 value = target_buffer_get_u16(target, buffer);
1965 xscale_send_u32(target, value);
1970 xscale_send_u32(target, value);
1974 LOG_ERROR("should never get here");
1979 if ((retval = xscale_send(target, buffer, count, size)) != ERROR_OK)
1982 /* examine DCSR, to see if Sticky Abort (SA) got set */
1983 if ((retval = xscale_read_dcsr(target)) != ERROR_OK)
1985 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
1988 if ((retval = xscale_send_u32(target, 0x60)) != ERROR_OK)
1991 return ERROR_TARGET_DATA_ABORT;
1997 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1998 uint32_t size, uint32_t count, uint8_t *buffer)
2000 struct xscale_common *xscale = target_to_xscale(target);
2002 /* with MMU inactive, there are only physical addresses */
2003 if (!xscale->armv4_5_mmu.mmu_enabled)
2004 return xscale_read_memory(target, address, size, count, buffer);
2006 /** \todo: provide a non-stub implementation of this routine. */
2007 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
2008 target_name(target), __func__);
2012 static int xscale_bulk_write_memory(struct target *target, uint32_t address,
2013 uint32_t count, uint8_t *buffer)
2015 return xscale_write_memory(target, address, 4, count, buffer);
2018 static uint32_t xscale_get_ttb(struct target *target)
2020 struct xscale_common *xscale = target_to_xscale(target);
2023 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
2024 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
2029 static void xscale_disable_mmu_caches(struct target *target, int mmu,
2030 int d_u_cache, int i_cache)
2032 struct xscale_common *xscale = target_to_xscale(target);
2033 uint32_t cp15_control;
2035 /* read cp15 control register */
2036 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2037 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2040 cp15_control &= ~0x1U;
2045 xscale_send_u32(target, 0x50);
2046 xscale_send_u32(target, xscale->cache_clean_address);
2048 /* invalidate DCache */
2049 xscale_send_u32(target, 0x51);
2051 cp15_control &= ~0x4U;
2056 /* invalidate ICache */
2057 xscale_send_u32(target, 0x52);
2058 cp15_control &= ~0x1000U;
2061 /* write new cp15 control register */
2062 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2064 /* execute cpwait to ensure outstanding operations complete */
2065 xscale_send_u32(target, 0x53);
2068 static void xscale_enable_mmu_caches(struct target *target, int mmu,
2069 int d_u_cache, int i_cache)
2071 struct xscale_common *xscale = target_to_xscale(target);
2072 uint32_t cp15_control;
2074 /* read cp15 control register */
2075 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2076 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2079 cp15_control |= 0x1U;
2082 cp15_control |= 0x4U;
2085 cp15_control |= 0x1000U;
2087 /* write new cp15 control register */
2088 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2090 /* execute cpwait to ensure outstanding operations complete */
2091 xscale_send_u32(target, 0x53);
2094 static int xscale_set_breakpoint(struct target *target,
2095 struct breakpoint *breakpoint)
2098 struct xscale_common *xscale = target_to_xscale(target);
2100 if (target->state != TARGET_HALTED)
2102 LOG_WARNING("target not halted");
2103 return ERROR_TARGET_NOT_HALTED;
2106 if (breakpoint->set)
2108 LOG_WARNING("breakpoint already set");
2112 if (breakpoint->type == BKPT_HARD)
2114 uint32_t value = breakpoint->address | 1;
2115 if (!xscale->ibcr0_used)
2117 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2118 xscale->ibcr0_used = 1;
2119 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2121 else if (!xscale->ibcr1_used)
2123 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2124 xscale->ibcr1_used = 1;
2125 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2129 LOG_ERROR("BUG: no hardware comparator available");
2133 else if (breakpoint->type == BKPT_SOFT)
2135 if (breakpoint->length == 4)
2137 /* keep the original instruction in target endianness */
2138 if ((retval = target_read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2142 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2143 if ((retval = target_write_u32(target, breakpoint->address, xscale->arm_bkpt)) != ERROR_OK)
2150 /* keep the original instruction in target endianness */
2151 if ((retval = target_read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2155 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
2156 if ((retval = target_write_u32(target, breakpoint->address, xscale->thumb_bkpt)) != ERROR_OK)
2161 breakpoint->set = 1;
2167 static int xscale_add_breakpoint(struct target *target,
2168 struct breakpoint *breakpoint)
2170 struct xscale_common *xscale = target_to_xscale(target);
2172 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
2174 LOG_INFO("no breakpoint unit available for hardware breakpoint");
2175 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2178 if ((breakpoint->length != 2) && (breakpoint->length != 4))
2180 LOG_INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2184 if (breakpoint->type == BKPT_HARD)
2186 xscale->ibcr_available--;
2192 static int xscale_unset_breakpoint(struct target *target,
2193 struct breakpoint *breakpoint)
2196 struct xscale_common *xscale = target_to_xscale(target);
2198 if (target->state != TARGET_HALTED)
2200 LOG_WARNING("target not halted");
2201 return ERROR_TARGET_NOT_HALTED;
2204 if (!breakpoint->set)
2206 LOG_WARNING("breakpoint not set");
2210 if (breakpoint->type == BKPT_HARD)
2212 if (breakpoint->set == 1)
2214 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2215 xscale->ibcr0_used = 0;
2217 else if (breakpoint->set == 2)
2219 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2220 xscale->ibcr1_used = 0;
2222 breakpoint->set = 0;
2226 /* restore original instruction (kept in target endianness) */
2227 if (breakpoint->length == 4)
2229 if ((retval = target_write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
2236 if ((retval = target_write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
2241 breakpoint->set = 0;
2247 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2249 struct xscale_common *xscale = target_to_xscale(target);
2251 if (target->state != TARGET_HALTED)
2253 LOG_WARNING("target not halted");
2254 return ERROR_TARGET_NOT_HALTED;
2257 if (breakpoint->set)
2259 xscale_unset_breakpoint(target, breakpoint);
2262 if (breakpoint->type == BKPT_HARD)
2263 xscale->ibcr_available++;
2268 static int xscale_set_watchpoint(struct target *target,
2269 struct watchpoint *watchpoint)
2271 struct xscale_common *xscale = target_to_xscale(target);
2273 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2274 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2276 if (target->state != TARGET_HALTED)
2278 LOG_WARNING("target not halted");
2279 return ERROR_TARGET_NOT_HALTED;
2282 xscale_get_reg(dbcon);
2284 switch (watchpoint->rw)
2296 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2299 if (!xscale->dbr0_used)
2301 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2302 dbcon_value |= enable;
2303 xscale_set_reg_u32(dbcon, dbcon_value);
2304 watchpoint->set = 1;
2305 xscale->dbr0_used = 1;
2307 else if (!xscale->dbr1_used)
2309 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2310 dbcon_value |= enable << 2;
2311 xscale_set_reg_u32(dbcon, dbcon_value);
2312 watchpoint->set = 2;
2313 xscale->dbr1_used = 1;
2317 LOG_ERROR("BUG: no hardware comparator available");
2324 static int xscale_add_watchpoint(struct target *target,
2325 struct watchpoint *watchpoint)
2327 struct xscale_common *xscale = target_to_xscale(target);
2329 if (xscale->dbr_available < 1)
2331 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2334 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
2336 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2339 xscale->dbr_available--;
2344 static int xscale_unset_watchpoint(struct target *target,
2345 struct watchpoint *watchpoint)
2347 struct xscale_common *xscale = target_to_xscale(target);
2348 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2349 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2351 if (target->state != TARGET_HALTED)
2353 LOG_WARNING("target not halted");
2354 return ERROR_TARGET_NOT_HALTED;
2357 if (!watchpoint->set)
2359 LOG_WARNING("breakpoint not set");
2363 if (watchpoint->set == 1)
2365 dbcon_value &= ~0x3;
2366 xscale_set_reg_u32(dbcon, dbcon_value);
2367 xscale->dbr0_used = 0;
2369 else if (watchpoint->set == 2)
2371 dbcon_value &= ~0xc;
2372 xscale_set_reg_u32(dbcon, dbcon_value);
2373 xscale->dbr1_used = 0;
2375 watchpoint->set = 0;
2380 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2382 struct xscale_common *xscale = target_to_xscale(target);
2384 if (target->state != TARGET_HALTED)
2386 LOG_WARNING("target not halted");
2387 return ERROR_TARGET_NOT_HALTED;
2390 if (watchpoint->set)
2392 xscale_unset_watchpoint(target, watchpoint);
2395 xscale->dbr_available++;
2400 static int xscale_get_reg(struct reg *reg)
2402 struct xscale_reg *arch_info = reg->arch_info;
2403 struct target *target = arch_info->target;
2404 struct xscale_common *xscale = target_to_xscale(target);
2406 /* DCSR, TX and RX are accessible via JTAG */
2407 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2409 return xscale_read_dcsr(arch_info->target);
2411 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2413 /* 1 = consume register content */
2414 return xscale_read_tx(arch_info->target, 1);
2416 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2418 /* can't read from RX register (host -> debug handler) */
2421 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2423 /* can't (explicitly) read from TXRXCTRL register */
2426 else /* Other DBG registers have to be transfered by the debug handler */
2428 /* send CP read request (command 0x40) */
2429 xscale_send_u32(target, 0x40);
2431 /* send CP register number */
2432 xscale_send_u32(target, arch_info->dbg_handler_number);
2434 /* read register value */
2435 xscale_read_tx(target, 1);
2436 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2445 static int xscale_set_reg(struct reg *reg, uint8_t* buf)
2447 struct xscale_reg *arch_info = reg->arch_info;
2448 struct target *target = arch_info->target;
2449 struct xscale_common *xscale = target_to_xscale(target);
2450 uint32_t value = buf_get_u32(buf, 0, 32);
2452 /* DCSR, TX and RX are accessible via JTAG */
2453 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2455 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2456 return xscale_write_dcsr(arch_info->target, -1, -1);
2458 else if (strcmp(reg->name, "XSCALE_RX") == 0)
2460 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2461 return xscale_write_rx(arch_info->target);
2463 else if (strcmp(reg->name, "XSCALE_TX") == 0)
2465 /* can't write to TX register (debug-handler -> host) */
2468 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
2470 /* can't (explicitly) write to TXRXCTRL register */
2473 else /* Other DBG registers have to be transfered by the debug handler */
2475 /* send CP write request (command 0x41) */
2476 xscale_send_u32(target, 0x41);
2478 /* send CP register number */
2479 xscale_send_u32(target, arch_info->dbg_handler_number);
2481 /* send CP register value */
2482 xscale_send_u32(target, value);
2483 buf_set_u32(reg->value, 0, 32, value);
2489 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2491 struct xscale_common *xscale = target_to_xscale(target);
2492 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2493 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2495 /* send CP write request (command 0x41) */
2496 xscale_send_u32(target, 0x41);
2498 /* send CP register number */
2499 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2501 /* send CP register value */
2502 xscale_send_u32(target, value);
2503 buf_set_u32(dcsr->value, 0, 32, value);
2508 static int xscale_read_trace(struct target *target)
2510 struct xscale_common *xscale = target_to_xscale(target);
2511 struct arm *armv4_5 = &xscale->armv4_5_common;
2512 struct xscale_trace_data **trace_data_p;
2514 /* 258 words from debug handler
2515 * 256 trace buffer entries
2516 * 2 checkpoint addresses
2518 uint32_t trace_buffer[258];
2519 int is_address[256];
2522 if (target->state != TARGET_HALTED)
2524 LOG_WARNING("target must be stopped to read trace data");
2525 return ERROR_TARGET_NOT_HALTED;
2528 /* send read trace buffer command (command 0x61) */
2529 xscale_send_u32(target, 0x61);
2531 /* receive trace buffer content */
2532 xscale_receive(target, trace_buffer, 258);
2534 /* parse buffer backwards to identify address entries */
2535 for (i = 255; i >= 0; i--)
2538 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2539 ((trace_buffer[i] & 0xf0) == 0xd0))
2542 is_address[--i] = 1;
2544 is_address[--i] = 1;
2546 is_address[--i] = 1;
2548 is_address[--i] = 1;
2553 /* search first non-zero entry */
2554 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2559 LOG_DEBUG("no trace data collected");
2560 return ERROR_XSCALE_NO_TRACE_DATA;
2563 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
2566 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2567 (*trace_data_p)->next = NULL;
2568 (*trace_data_p)->chkpt0 = trace_buffer[256];
2569 (*trace_data_p)->chkpt1 = trace_buffer[257];
2570 (*trace_data_p)->last_instruction =
2571 buf_get_u32(armv4_5->pc->value, 0, 32);
2572 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2573 (*trace_data_p)->depth = 256 - j;
2575 for (i = j; i < 256; i++)
2577 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2579 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2581 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2587 static int xscale_read_instruction(struct target *target,
2588 struct arm_instruction *instruction)
2590 struct xscale_common *xscale = target_to_xscale(target);
2597 if (!xscale->trace.image)
2598 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2600 /* search for the section the current instruction belongs to */
2601 for (i = 0; i < xscale->trace.image->num_sections; i++)
2603 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
2604 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
2613 /* current instruction couldn't be found in the image */
2614 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2617 if (xscale->trace.core_state == ARM_STATE_ARM)
2620 if ((retval = image_read_section(xscale->trace.image, section,
2621 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2622 4, buf, &size_read)) != ERROR_OK)
2624 LOG_ERROR("error while reading instruction: %i", retval);
2625 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2627 opcode = target_buffer_get_u32(target, buf);
2628 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2630 else if (xscale->trace.core_state == ARM_STATE_THUMB)
2633 if ((retval = image_read_section(xscale->trace.image, section,
2634 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
2635 2, buf, &size_read)) != ERROR_OK)
2637 LOG_ERROR("error while reading instruction: %i", retval);
2638 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2640 opcode = target_buffer_get_u16(target, buf);
2641 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
2645 LOG_ERROR("BUG: unknown core state encountered");
2652 static int xscale_branch_address(struct xscale_trace_data *trace_data,
2653 int i, uint32_t *target)
2655 /* if there are less than four entries prior to the indirect branch message
2656 * we can't extract the address */
2662 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2663 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2668 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2670 struct xscale_common *xscale = target_to_xscale(target);
2672 uint32_t next_pc = 0x0;
2673 struct xscale_trace_data *trace_data = xscale->trace.data;
2682 xscale->trace.core_state = ARM_STATE_ARM;
2687 for (i = 0; i < trace_data->depth; i++)
2693 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2696 switch ((trace_data->entries[i].data & 0xf0) >> 4)
2698 case 0: /* Exceptions */
2706 exception = (trace_data->entries[i].data & 0x70) >> 4;
2708 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
2709 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
2711 case 8: /* Direct Branch */
2714 case 9: /* Indirect Branch */
2716 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2721 case 13: /* Checkpointed Indirect Branch */
2722 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
2725 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
2726 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
2727 LOG_WARNING("checkpointed indirect branch target address doesn't match checkpoint");
2729 /* explicit fall-through */
2730 case 12: /* Checkpointed Direct Branch */
2735 next_pc = trace_data->chkpt0;
2738 else if (chkpt == 1)
2741 next_pc = trace_data->chkpt0;
2746 LOG_WARNING("more than two checkpointed branches encountered");
2749 case 15: /* Roll-over */
2752 default: /* Reserved */
2753 command_print(cmd_ctx, "--- reserved trace message ---");
2754 LOG_ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
2758 if (xscale->trace.pc_ok)
2760 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
2761 struct arm_instruction instruction;
2763 if ((exception == 6) || (exception == 7))
2765 /* IRQ or FIQ exception, no instruction executed */
2769 while (executed-- >= 0)
2771 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2773 /* can't continue tracing with no image available */
2774 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2778 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2780 /* TODO: handle incomplete images */
2784 /* a precise abort on a load to the PC is included in the incremental
2785 * word count, other instructions causing data aborts are not included
2787 if ((executed == 0) && (exception == 4)
2788 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
2790 if ((instruction.type == ARM_LDM)
2791 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
2795 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
2796 && (instruction.info.load_store.Rd != 15))
2802 /* only the last instruction executed
2803 * (the one that caused the control flow change)
2804 * could be a taken branch
2806 if (((executed == -1) && (branch == 1)) &&
2807 (((instruction.type == ARM_B) ||
2808 (instruction.type == ARM_BL) ||
2809 (instruction.type == ARM_BLX)) &&
2810 (instruction.info.b_bl_bx_blx.target_address != 0xffffffff)))
2812 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
2816 xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2;
2818 command_print(cmd_ctx, "%s", instruction.text);
2826 xscale->trace.current_pc = next_pc;
2827 xscale->trace.pc_ok = 1;
2831 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARM_STATE_ARM) ? 4 : 2)
2833 struct arm_instruction instruction;
2834 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
2836 /* can't continue tracing with no image available */
2837 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
2841 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
2843 /* TODO: handle incomplete images */
2846 command_print(cmd_ctx, "%s", instruction.text);
2849 trace_data = trace_data->next;
2855 static const struct reg_arch_type xscale_reg_type = {
2856 .get = xscale_get_reg,
2857 .set = xscale_set_reg,
2860 static void xscale_build_reg_cache(struct target *target)
2862 struct xscale_common *xscale = target_to_xscale(target);
2863 struct arm *armv4_5 = &xscale->armv4_5_common;
2864 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2865 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2867 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2869 (*cache_p) = arm_build_reg_cache(target, armv4_5);
2871 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2872 cache_p = &(*cache_p)->next;
2874 /* fill in values for the xscale reg cache */
2875 (*cache_p)->name = "XScale registers";
2876 (*cache_p)->next = NULL;
2877 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2878 (*cache_p)->num_regs = num_regs;
2880 for (i = 0; i < num_regs; i++)
2882 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2883 (*cache_p)->reg_list[i].value = calloc(4, 1);
2884 (*cache_p)->reg_list[i].dirty = 0;
2885 (*cache_p)->reg_list[i].valid = 0;
2886 (*cache_p)->reg_list[i].size = 32;
2887 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2888 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2889 arch_info[i] = xscale_reg_arch_info[i];
2890 arch_info[i].target = target;
2893 xscale->reg_cache = (*cache_p);
2896 static int xscale_init_target(struct command_context *cmd_ctx,
2897 struct target *target)
2899 xscale_build_reg_cache(target);
2903 static int xscale_init_arch_info(struct target *target,
2904 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2906 struct arm *armv4_5;
2907 uint32_t high_reset_branch, low_reset_branch;
2910 armv4_5 = &xscale->armv4_5_common;
2912 /* store architecture specfic data */
2913 xscale->common_magic = XSCALE_COMMON_MAGIC;
2915 /* we don't really *need* a variant param ... */
2919 if (strcmp(variant, "pxa250") == 0
2920 || strcmp(variant, "pxa255") == 0
2921 || strcmp(variant, "pxa26x") == 0)
2923 else if (strcmp(variant, "pxa27x") == 0
2924 || strcmp(variant, "ixp42x") == 0
2925 || strcmp(variant, "ixp45x") == 0
2926 || strcmp(variant, "ixp46x") == 0)
2928 else if (strcmp(variant, "pxa3xx") == 0)
2931 LOG_WARNING("%s: unrecognized variant %s",
2932 tap->dotted_name, variant);
2934 if (ir_length && ir_length != tap->ir_length) {
2935 LOG_WARNING("%s: IR length for %s is %d; fixing",
2936 tap->dotted_name, variant, ir_length);
2937 tap->ir_length = ir_length;
2941 /* PXA3xx shifts the JTAG instructions */
2942 if (tap->ir_length == 11)
2943 xscale->xscale_variant = XSCALE_PXA3XX;
2945 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2947 /* the debug handler isn't installed (and thus not running) at this time */
2948 xscale->handler_address = 0xfe000800;
2950 /* clear the vectors we keep locally for reference */
2951 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2952 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2954 /* no user-specified vectors have been configured yet */
2955 xscale->static_low_vectors_set = 0x0;
2956 xscale->static_high_vectors_set = 0x0;
2958 /* calculate branches to debug handler */
2959 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2960 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2962 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2963 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2965 for (i = 1; i <= 7; i++)
2967 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2968 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2971 /* 64kB aligned region used for DCache cleaning */
2972 xscale->cache_clean_address = 0xfffe0000;
2974 xscale->hold_rst = 0;
2975 xscale->external_debug_break = 0;
2977 xscale->ibcr_available = 2;
2978 xscale->ibcr0_used = 0;
2979 xscale->ibcr1_used = 0;
2981 xscale->dbr_available = 2;
2982 xscale->dbr0_used = 0;
2983 xscale->dbr1_used = 0;
2985 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2986 target_name(target));
2988 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2989 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2991 xscale->vector_catch = 0x1;
2993 xscale->trace.capture_status = TRACE_IDLE;
2994 xscale->trace.data = NULL;
2995 xscale->trace.image = NULL;
2996 xscale->trace.buffer_enabled = 0;
2997 xscale->trace.buffer_fill = 0;
2999 /* prepare ARMv4/5 specific information */
3000 armv4_5->arch_info = xscale;
3001 armv4_5->read_core_reg = xscale_read_core_reg;
3002 armv4_5->write_core_reg = xscale_write_core_reg;
3003 armv4_5->full_context = xscale_full_context;
3005 arm_init_arch_info(target, armv4_5);
3007 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3008 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3009 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3010 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3011 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3012 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3013 xscale->armv4_5_mmu.has_tiny_pages = 1;
3014 xscale->armv4_5_mmu.mmu_enabled = 0;
3019 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3021 struct xscale_common *xscale;
3023 if (sizeof xscale_debug_handler - 1 > 0x800) {
3024 LOG_ERROR("debug_handler.bin: larger than 2kb");
3028 xscale = calloc(1, sizeof(*xscale));
3032 return xscale_init_arch_info(target, xscale, target->tap,
3036 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3038 struct target *target = NULL;
3039 struct xscale_common *xscale;
3041 uint32_t handler_address;
3045 LOG_ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
3049 if ((target = get_target(CMD_ARGV[0])) == NULL)
3051 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3055 xscale = target_to_xscale(target);
3056 retval = xscale_verify_pointer(CMD_CTX, xscale);
3057 if (retval != ERROR_OK)
3060 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3062 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3063 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3065 xscale->handler_address = handler_address;
3069 LOG_ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3076 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3078 struct target *target = NULL;
3079 struct xscale_common *xscale;
3081 uint32_t cache_clean_address;
3085 return ERROR_COMMAND_SYNTAX_ERROR;
3088 target = get_target(CMD_ARGV[0]);
3091 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3094 xscale = target_to_xscale(target);
3095 retval = xscale_verify_pointer(CMD_CTX, xscale);
3096 if (retval != ERROR_OK)
3099 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3101 if (cache_clean_address & 0xffff)
3103 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3107 xscale->cache_clean_address = cache_clean_address;
3113 COMMAND_HANDLER(xscale_handle_cache_info_command)
3115 struct target *target = get_current_target(CMD_CTX);
3116 struct xscale_common *xscale = target_to_xscale(target);
3119 retval = xscale_verify_pointer(CMD_CTX, xscale);
3120 if (retval != ERROR_OK)
3123 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3126 static int xscale_virt2phys(struct target *target,
3127 uint32_t virtual, uint32_t *physical)
3129 struct xscale_common *xscale = target_to_xscale(target);
3135 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3136 LOG_ERROR(xscale_not);
3137 return ERROR_TARGET_INVALID;
3140 uint32_t ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
3149 static int xscale_mmu(struct target *target, int *enabled)
3151 struct xscale_common *xscale = target_to_xscale(target);
3153 if (target->state != TARGET_HALTED)
3155 LOG_ERROR("Target not halted");
3156 return ERROR_TARGET_INVALID;
3158 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3162 COMMAND_HANDLER(xscale_handle_mmu_command)
3164 struct target *target = get_current_target(CMD_CTX);
3165 struct xscale_common *xscale = target_to_xscale(target);
3168 retval = xscale_verify_pointer(CMD_CTX, xscale);
3169 if (retval != ERROR_OK)
3172 if (target->state != TARGET_HALTED)
3174 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3181 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3183 xscale_enable_mmu_caches(target, 1, 0, 0);
3185 xscale_disable_mmu_caches(target, 1, 0, 0);
3186 xscale->armv4_5_mmu.mmu_enabled = enable;
3189 command_print(CMD_CTX, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3194 COMMAND_HANDLER(xscale_handle_idcache_command)
3196 struct target *target = get_current_target(CMD_CTX);
3197 struct xscale_common *xscale = target_to_xscale(target);
3199 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3200 if (retval != ERROR_OK)
3203 if (target->state != TARGET_HALTED)
3205 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3209 bool icache = false;
3210 if (strcmp(CMD_NAME, "icache") == 0)
3215 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3217 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3219 xscale_enable_mmu_caches(target, 0, 0, 1);
3221 xscale_disable_mmu_caches(target, 0, 0, 1);
3223 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3225 xscale_enable_mmu_caches(target, 0, 1, 0);
3227 xscale_disable_mmu_caches(target, 0, 1, 0);
3231 bool enabled = icache ?
3232 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3233 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3234 const char *msg = enabled ? "enabled" : "disabled";
3235 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3240 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3242 struct target *target = get_current_target(CMD_CTX);
3243 struct xscale_common *xscale = target_to_xscale(target);
3246 retval = xscale_verify_pointer(CMD_CTX, xscale);
3247 if (retval != ERROR_OK)
3252 command_print(CMD_CTX, "usage: xscale vector_catch [mask]");
3256 COMMAND_PARSE_NUMBER(u8, CMD_ARGV[0], xscale->vector_catch);
3257 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
3258 xscale_write_dcsr(target, -1, -1);
3261 command_print(CMD_CTX, "vector catch mask: 0x%2.2x", xscale->vector_catch);
3267 COMMAND_HANDLER(xscale_handle_vector_table_command)
3269 struct target *target = get_current_target(CMD_CTX);
3270 struct xscale_common *xscale = target_to_xscale(target);
3274 retval = xscale_verify_pointer(CMD_CTX, xscale);
3275 if (retval != ERROR_OK)
3278 if (CMD_ARGC == 0) /* print current settings */
3282 command_print(CMD_CTX, "active user-set static vectors:");
3283 for (idx = 1; idx < 8; idx++)
3284 if (xscale->static_low_vectors_set & (1 << idx))
3285 command_print(CMD_CTX, "low %d: 0x%" PRIx32, idx, xscale->static_low_vectors[idx]);
3286 for (idx = 1; idx < 8; idx++)
3287 if (xscale->static_high_vectors_set & (1 << idx))
3288 command_print(CMD_CTX, "high %d: 0x%" PRIx32, idx, xscale->static_high_vectors[idx]);
3297 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3299 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3301 if (idx < 1 || idx >= 8)
3304 if (!err && strcmp(CMD_ARGV[0], "low") == 0)
3306 xscale->static_low_vectors_set |= (1<<idx);
3307 xscale->static_low_vectors[idx] = vec;
3309 else if (!err && (strcmp(CMD_ARGV[0], "high") == 0))
3311 xscale->static_high_vectors_set |= (1<<idx);
3312 xscale->static_high_vectors[idx] = vec;
3319 command_print(CMD_CTX, "usage: xscale vector_table <high|low> <index> <code>");
3325 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3327 struct target *target = get_current_target(CMD_CTX);
3328 struct xscale_common *xscale = target_to_xscale(target);
3329 struct arm *armv4_5 = &xscale->armv4_5_common;
3330 uint32_t dcsr_value;
3333 retval = xscale_verify_pointer(CMD_CTX, xscale);
3334 if (retval != ERROR_OK)
3337 if (target->state != TARGET_HALTED)
3339 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3343 if ((CMD_ARGC >= 1) && (strcmp("enable", CMD_ARGV[0]) == 0))
3345 struct xscale_trace_data *td, *next_td;
3346 xscale->trace.buffer_enabled = 1;
3348 /* free old trace data */
3349 td = xscale->trace.data;
3359 xscale->trace.data = NULL;
3361 else if ((CMD_ARGC >= 1) && (strcmp("disable", CMD_ARGV[0]) == 0))
3363 xscale->trace.buffer_enabled = 0;
3366 if ((CMD_ARGC >= 2) && (strcmp("fill", CMD_ARGV[1]) == 0))
3370 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], fill);
3371 xscale->trace.buffer_fill = fill;
3373 else if ((CMD_ARGC >= 2) && (strcmp("wrap", CMD_ARGV[1]) == 0))
3375 xscale->trace.buffer_fill = -1;
3378 if (xscale->trace.buffer_enabled)
3380 /* if we enable the trace buffer in fill-once
3381 * mode we know the address of the first instruction */
3382 xscale->trace.pc_ok = 1;
3383 xscale->trace.current_pc =
3384 buf_get_u32(armv4_5->pc->value, 0, 32);
3388 /* otherwise the address is unknown, and we have no known good PC */
3389 xscale->trace.pc_ok = 0;
3392 command_print(CMD_CTX, "trace buffer %s (%s)",
3393 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
3394 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
3396 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3397 if (xscale->trace.buffer_fill >= 0)
3398 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3400 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3405 COMMAND_HANDLER(xscale_handle_trace_image_command)
3407 struct target *target = get_current_target(CMD_CTX);
3408 struct xscale_common *xscale = target_to_xscale(target);
3413 command_print(CMD_CTX, "usage: xscale trace_image <file> [base address] [type]");
3417 retval = xscale_verify_pointer(CMD_CTX, xscale);
3418 if (retval != ERROR_OK)
3421 if (xscale->trace.image)
3423 image_close(xscale->trace.image);
3424 free(xscale->trace.image);
3425 command_print(CMD_CTX, "previously loaded image found and closed");
3428 xscale->trace.image = malloc(sizeof(struct image));
3429 xscale->trace.image->base_address_set = 0;
3430 xscale->trace.image->start_address_set = 0;
3432 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3435 xscale->trace.image->base_address_set = 1;
3436 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3440 xscale->trace.image->base_address_set = 0;
3443 if (image_open(xscale->trace.image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3445 free(xscale->trace.image);
3446 xscale->trace.image = NULL;
3453 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3455 struct target *target = get_current_target(CMD_CTX);
3456 struct xscale_common *xscale = target_to_xscale(target);
3457 struct xscale_trace_data *trace_data;
3461 retval = xscale_verify_pointer(CMD_CTX, xscale);
3462 if (retval != ERROR_OK)
3465 if (target->state != TARGET_HALTED)
3467 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3473 command_print(CMD_CTX, "usage: xscale dump_trace <file>");
3477 trace_data = xscale->trace.data;
3481 command_print(CMD_CTX, "no trace data collected");
3485 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3494 fileio_write_u32(&file, trace_data->chkpt0);
3495 fileio_write_u32(&file, trace_data->chkpt1);
3496 fileio_write_u32(&file, trace_data->last_instruction);
3497 fileio_write_u32(&file, trace_data->depth);
3499 for (i = 0; i < trace_data->depth; i++)
3500 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
3502 trace_data = trace_data->next;
3505 fileio_close(&file);
3510 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3512 struct target *target = get_current_target(CMD_CTX);
3513 struct xscale_common *xscale = target_to_xscale(target);
3516 retval = xscale_verify_pointer(CMD_CTX, xscale);
3517 if (retval != ERROR_OK)
3520 xscale_analyze_trace(target, CMD_CTX);
3525 COMMAND_HANDLER(xscale_handle_cp15)
3527 struct target *target = get_current_target(CMD_CTX);
3528 struct xscale_common *xscale = target_to_xscale(target);
3531 retval = xscale_verify_pointer(CMD_CTX, xscale);
3532 if (retval != ERROR_OK)
3535 if (target->state != TARGET_HALTED)
3537 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3540 uint32_t reg_no = 0;
3541 struct reg *reg = NULL;
3544 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3545 /*translate from xscale cp15 register no to openocd register*/
3549 reg_no = XSCALE_MAINID;
3552 reg_no = XSCALE_CTRL;
3555 reg_no = XSCALE_TTB;
3558 reg_no = XSCALE_DAC;
3561 reg_no = XSCALE_FSR;
3564 reg_no = XSCALE_FAR;
3567 reg_no = XSCALE_PID;
3570 reg_no = XSCALE_CPACCESS;
3573 command_print(CMD_CTX, "invalid register number");
3574 return ERROR_INVALID_ARGUMENTS;
3576 reg = &xscale->reg_cache->reg_list[reg_no];
3583 /* read cp15 control register */
3584 xscale_get_reg(reg);
3585 value = buf_get_u32(reg->value, 0, 32);
3586 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size), value);
3588 else if (CMD_ARGC == 2)
3591 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3593 /* send CP write request (command 0x41) */
3594 xscale_send_u32(target, 0x41);
3596 /* send CP register number */
3597 xscale_send_u32(target, reg_no);
3599 /* send CP register value */
3600 xscale_send_u32(target, value);
3602 /* execute cpwait to ensure outstanding operations complete */
3603 xscale_send_u32(target, 0x53);
3607 command_print(CMD_CTX, "usage: cp15 [register]<, [value]>");
3613 static const struct command_registration xscale_exec_command_handlers[] = {
3615 .name = "cache_info",
3616 .handler = xscale_handle_cache_info_command,
3617 .mode = COMMAND_EXEC,
3618 .help = "display information about CPU caches",
3622 .handler = xscale_handle_mmu_command,
3623 .mode = COMMAND_EXEC,
3624 .help = "enable or disable the MMU",
3625 .usage = "['enable'|'disable']",
3629 .handler = xscale_handle_idcache_command,
3630 .mode = COMMAND_EXEC,
3631 .help = "display ICache state, optionally enabling or "
3633 .usage = "['enable'|'disable']",
3637 .handler = xscale_handle_idcache_command,
3638 .mode = COMMAND_EXEC,
3639 .help = "display DCache state, optionally enabling or "
3641 .usage = "['enable'|'disable']",
3644 .name = "vector_catch",
3645 .handler = xscale_handle_vector_catch_command,
3646 .mode = COMMAND_EXEC,
3647 .help = "set or display 8-bit mask of vectors "
3648 "that should trigger debug entry",
3652 .name = "vector_table",
3653 .handler = xscale_handle_vector_table_command,
3654 .mode = COMMAND_EXEC,
3655 .help = "set vector table entry in mini-ICache, "
3656 "or display current tables",
3657 .usage = "[('high'|'low') index code]",
3660 .name = "trace_buffer",
3661 .handler = xscale_handle_trace_buffer_command,
3662 .mode = COMMAND_EXEC,
3663 .help = "display trace buffer status, enable or disable "
3664 "tracing, and optionally reconfigure trace mode",
3665 .usage = "['enable'|'disable' ['fill' number|'wrap']]",
3668 .name = "dump_trace",
3669 .handler = xscale_handle_dump_trace_command,
3670 .mode = COMMAND_EXEC,
3671 .help = "dump content of trace buffer to file",
3672 .usage = "filename",
3675 .name = "analyze_trace",
3676 .handler = xscale_handle_analyze_trace_buffer_command,
3677 .mode = COMMAND_EXEC,
3678 .help = "analyze content of trace buffer",
3682 .name = "trace_image",
3683 .handler = xscale_handle_trace_image_command,
3684 .mode = COMMAND_EXEC,
3685 .help = "load image from file to address (default 0)",
3686 .usage = "filename [offset [filetype]]",
3690 .handler = xscale_handle_cp15,
3691 .mode = COMMAND_EXEC,
3692 .help = "Read or write coprocessor 15 register.",
3693 .usage = "register [value]",
3695 COMMAND_REGISTRATION_DONE
3697 static const struct command_registration xscale_any_command_handlers[] = {
3699 .name = "debug_handler",
3700 .handler = xscale_handle_debug_handler_command,
3701 .mode = COMMAND_ANY,
3702 .help = "Change address used for debug handler.",
3703 .usage = "target address",
3706 .name = "cache_clean_address",
3707 .handler = xscale_handle_cache_clean_address_command,
3708 .mode = COMMAND_ANY,
3709 .help = "Change address used for cleaning data cache.",
3713 .chain = xscale_exec_command_handlers,
3715 COMMAND_REGISTRATION_DONE
3717 static const struct command_registration xscale_command_handlers[] = {
3719 .chain = arm_command_handlers,
3723 .mode = COMMAND_ANY,
3724 .help = "xscale command group",
3725 .chain = xscale_any_command_handlers,
3727 COMMAND_REGISTRATION_DONE
3730 struct target_type xscale_target =
3734 .poll = xscale_poll,
3735 .arch_state = xscale_arch_state,
3737 .target_request_data = NULL,
3739 .halt = xscale_halt,
3740 .resume = xscale_resume,
3741 .step = xscale_step,
3743 .assert_reset = xscale_assert_reset,
3744 .deassert_reset = xscale_deassert_reset,
3745 .soft_reset_halt = NULL,
3747 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3748 .get_gdb_reg_list = arm_get_gdb_reg_list,
3750 .read_memory = xscale_read_memory,
3751 .read_phys_memory = xscale_read_phys_memory,
3752 .write_memory = xscale_write_memory,
3753 .write_phys_memory = xscale_write_phys_memory,
3754 .bulk_write_memory = xscale_bulk_write_memory,
3756 .checksum_memory = arm_checksum_memory,
3757 .blank_check_memory = arm_blank_check_memory,
3759 .run_algorithm = armv4_5_run_algorithm,
3761 .add_breakpoint = xscale_add_breakpoint,
3762 .remove_breakpoint = xscale_remove_breakpoint,
3763 .add_watchpoint = xscale_add_watchpoint,
3764 .remove_watchpoint = xscale_remove_watchpoint,
3766 .commands = xscale_command_handlers,
3767 .target_create = xscale_target_create,
3768 .init_target = xscale_init_target,
3770 .virt2phys = xscale_virt2phys,