1 /***************************************************************************
\r
2 * Copyright (C) 2006, 2007 by Dominic Rath *
\r
3 * Dominic.Rath@gmx.de *
\r
5 * This program is free software; you can redistribute it and/or modify *
\r
6 * it under the terms of the GNU General Public License as published by *
\r
7 * the Free Software Foundation; either version 2 of the License, or *
\r
8 * (at your option) any later version. *
\r
10 * This program is distributed in the hope that it will be useful, *
\r
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
\r
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
\r
13 * GNU General Public License for more details. *
\r
15 * You should have received a copy of the GNU General Public License *
\r
16 * along with this program; if not, write to the *
\r
17 * Free Software Foundation, Inc., *
\r
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
\r
19 ***************************************************************************/
\r
20 #ifdef HAVE_CONFIG_H
\r
24 #include "replacements.h"
\r
28 #include "register.h"
\r
30 #include "armv4_5.h"
\r
31 #include "arm_simulator.h"
\r
32 #include "arm_disassembler.h"
\r
35 #include "binarybuffer.h"
\r
36 #include "time_support.h"
\r
37 #include "breakpoints.h"
\r
43 #include <sys/types.h>
\r
49 int xscale_register_commands(struct command_context_s *cmd_ctx);
\r
51 /* forward declarations */
\r
52 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target);
\r
53 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target);
\r
56 int xscale_arch_state(struct target_s *target);
\r
57 int xscale_poll(target_t *target);
\r
58 int xscale_halt(target_t *target);
\r
59 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution);
\r
60 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints);
\r
61 int xscale_debug_entry(target_t *target);
\r
62 int xscale_restore_context(target_t *target);
\r
64 int xscale_assert_reset(target_t *target);
\r
65 int xscale_deassert_reset(target_t *target);
\r
66 int xscale_soft_reset_halt(struct target_s *target);
\r
67 int xscale_prepare_reset_halt(struct target_s *target);
\r
69 int xscale_set_reg_u32(reg_t *reg, u32 value);
\r
71 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode);
\r
72 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value);
\r
74 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
\r
75 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer);
\r
76 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer);
\r
77 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum);
\r
79 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
\r
80 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
\r
81 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
\r
82 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
\r
83 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
\r
84 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint);
\r
85 void xscale_enable_watchpoints(struct target_s *target);
\r
86 void xscale_enable_breakpoints(struct target_s *target);
\r
87 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical);
\r
88 static int xscale_mmu(struct target_s *target, int *enabled);
\r
90 int xscale_read_trace(target_t *target);
\r
92 target_type_t xscale_target =
\r
96 .poll = xscale_poll,
\r
97 .arch_state = xscale_arch_state,
\r
99 .target_request_data = NULL,
\r
101 .halt = xscale_halt,
\r
102 .resume = xscale_resume,
\r
103 .step = xscale_step,
\r
105 .assert_reset = xscale_assert_reset,
\r
106 .deassert_reset = xscale_deassert_reset,
\r
107 .soft_reset_halt = xscale_soft_reset_halt,
\r
108 .prepare_reset_halt = xscale_prepare_reset_halt,
\r
110 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
\r
112 .read_memory = xscale_read_memory,
\r
113 .write_memory = xscale_write_memory,
\r
114 .bulk_write_memory = xscale_bulk_write_memory,
\r
115 .checksum_memory = xscale_checksum_memory,
\r
117 .run_algorithm = armv4_5_run_algorithm,
\r
119 .add_breakpoint = xscale_add_breakpoint,
\r
120 .remove_breakpoint = xscale_remove_breakpoint,
\r
121 .add_watchpoint = xscale_add_watchpoint,
\r
122 .remove_watchpoint = xscale_remove_watchpoint,
\r
124 .register_commands = xscale_register_commands,
\r
125 .target_command = xscale_target_command,
\r
126 .init_target = xscale_init_target,
\r
127 .quit = xscale_quit,
\r
129 .virt2phys = xscale_virt2phys,
\r
133 char* xscale_reg_list[] =
\r
135 "XSCALE_MAINID", /* 0 */
\r
136 "XSCALE_CACHETYPE",
\r
145 "XSCALE_IBCR0", /* 10 */
\r
155 "XSCALE_RX", /* 20 */
\r
159 xscale_reg_t xscale_reg_arch_info[] =
\r
161 {XSCALE_MAINID, NULL},
\r
162 {XSCALE_CACHETYPE, NULL},
\r
163 {XSCALE_CTRL, NULL},
\r
164 {XSCALE_AUXCTRL, NULL},
\r
165 {XSCALE_TTB, NULL},
\r
166 {XSCALE_DAC, NULL},
\r
167 {XSCALE_FSR, NULL},
\r
168 {XSCALE_FAR, NULL},
\r
169 {XSCALE_PID, NULL},
\r
170 {XSCALE_CPACCESS, NULL},
\r
171 {XSCALE_IBCR0, NULL},
\r
172 {XSCALE_IBCR1, NULL},
\r
173 {XSCALE_DBR0, NULL},
\r
174 {XSCALE_DBR1, NULL},
\r
175 {XSCALE_DBCON, NULL},
\r
176 {XSCALE_TBREG, NULL},
\r
177 {XSCALE_CHKPT0, NULL},
\r
178 {XSCALE_CHKPT1, NULL},
\r
179 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
\r
180 {-1, NULL}, /* TX accessed via JTAG */
\r
181 {-1, NULL}, /* RX accessed via JTAG */
\r
182 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
\r
185 int xscale_reg_arch_type = -1;
\r
187 int xscale_get_reg(reg_t *reg);
\r
188 int xscale_set_reg(reg_t *reg, u8 *buf);
\r
190 int xscale_get_arch_pointers(target_t *target, armv4_5_common_t **armv4_5_p, xscale_common_t **xscale_p)
\r
192 armv4_5_common_t *armv4_5 = target->arch_info;
\r
193 xscale_common_t *xscale = armv4_5->arch_info;
\r
195 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
\r
197 ERROR("target isn't an XScale target");
\r
201 if (xscale->common_magic != XSCALE_COMMON_MAGIC)
\r
203 ERROR("target isn't an XScale target");
\r
207 *armv4_5_p = armv4_5;
\r
208 *xscale_p = xscale;
\r
213 int xscale_jtag_set_instr(int chain_pos, u32 new_instr)
\r
215 jtag_device_t *device = jtag_get_device(chain_pos);
\r
217 if (buf_get_u32(device->cur_instr, 0, device->ir_length) != new_instr)
\r
219 scan_field_t field;
\r
221 field.device = chain_pos;
\r
222 field.num_bits = device->ir_length;
\r
223 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
\r
224 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
\r
225 field.out_mask = NULL;
\r
226 field.in_value = NULL;
\r
227 jtag_set_check_value(&field, device->expected, device->expected_mask, NULL);
\r
229 jtag_add_ir_scan(1, &field, -1);
\r
231 free(field.out_value);
\r
237 int xscale_jtag_callback(enum jtag_event event, void *priv)
\r
241 case JTAG_TRST_ASSERTED:
\r
243 case JTAG_TRST_RELEASED:
\r
245 case JTAG_SRST_ASSERTED:
\r
247 case JTAG_SRST_RELEASED:
\r
250 WARNING("unhandled JTAG event");
\r
256 int xscale_read_dcsr(target_t *target)
\r
258 armv4_5_common_t *armv4_5 = target->arch_info;
\r
259 xscale_common_t *xscale = armv4_5->arch_info;
\r
263 scan_field_t fields[3];
\r
265 u8 field0_check_value = 0x2;
\r
266 u8 field0_check_mask = 0x7;
\r
268 u8 field2_check_value = 0x0;
\r
269 u8 field2_check_mask = 0x1;
\r
271 jtag_add_end_state(TAP_PD);
\r
272 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
\r
274 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
\r
275 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
\r
277 fields[0].device = xscale->jtag_info.chain_pos;
\r
278 fields[0].num_bits = 3;
\r
279 fields[0].out_value = &field0;
\r
280 fields[0].out_mask = NULL;
\r
281 fields[0].in_value = NULL;
\r
282 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
284 fields[1].device = xscale->jtag_info.chain_pos;
\r
285 fields[1].num_bits = 32;
\r
286 fields[1].out_value = NULL;
\r
287 fields[1].out_mask = NULL;
\r
288 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
\r
289 fields[1].in_handler = NULL;
\r
290 fields[1].in_handler_priv = NULL;
\r
291 fields[1].in_check_value = NULL;
\r
292 fields[1].in_check_mask = NULL;
\r
296 fields[2].device = xscale->jtag_info.chain_pos;
\r
297 fields[2].num_bits = 1;
\r
298 fields[2].out_value = &field2;
\r
299 fields[2].out_mask = NULL;
\r
300 fields[2].in_value = NULL;
\r
301 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
303 jtag_add_dr_scan(3, fields, -1);
\r
305 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
307 ERROR("JTAG error while reading DCSR");
\r
311 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
\r
312 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
\r
314 /* write the register with the value we just read
\r
315 * on this second pass, only the first bit of field0 is guaranteed to be 0)
\r
317 field0_check_mask = 0x1;
\r
318 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
\r
319 fields[1].in_value = NULL;
\r
321 jtag_add_end_state(TAP_RTI);
\r
323 jtag_add_dr_scan(3, fields, -1);
\r
328 int xscale_receive(target_t *target, u32 *buffer, int num_words)
\r
330 int retval = ERROR_OK;
\r
331 armv4_5_common_t *armv4_5 = target->arch_info;
\r
332 xscale_common_t *xscale = armv4_5->arch_info;
\r
334 enum tap_state path[3];
\r
335 scan_field_t fields[3];
\r
337 u8 *field0 = malloc(num_words * 1);
\r
338 u8 field0_check_value = 0x2;
\r
339 u8 field0_check_mask = 0x6;
\r
340 u32 *field1 = malloc(num_words * 4);
\r
341 u8 field2_check_value = 0x0;
\r
342 u8 field2_check_mask = 0x1;
\r
343 int words_done = 0;
\r
344 int words_scheduled = 0;
\r
352 fields[0].device = xscale->jtag_info.chain_pos;
\r
353 fields[0].num_bits = 3;
\r
354 fields[0].out_value = NULL;
\r
355 fields[0].out_mask = NULL;
\r
356 /* fields[0].in_value = field0; */
\r
357 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
359 fields[1].device = xscale->jtag_info.chain_pos;
\r
360 fields[1].num_bits = 32;
\r
361 fields[1].out_value = NULL;
\r
362 fields[1].out_mask = NULL;
\r
363 fields[1].in_value = NULL;
\r
364 fields[1].in_handler = NULL;
\r
365 fields[1].in_handler_priv = NULL;
\r
366 fields[1].in_check_value = NULL;
\r
367 fields[1].in_check_mask = NULL;
\r
371 fields[2].device = xscale->jtag_info.chain_pos;
\r
372 fields[2].num_bits = 1;
\r
373 fields[2].out_value = NULL;
\r
374 fields[2].out_mask = NULL;
\r
375 fields[2].in_value = NULL;
\r
376 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
378 jtag_add_end_state(TAP_RTI);
\r
379 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
\r
380 jtag_add_runtest(1, -1);
\r
382 /* repeat until all words have been collected */
\r
384 while (words_done < num_words)
\r
386 /* schedule reads */
\r
387 words_scheduled = 0;
\r
388 for (i = words_done; i < num_words; i++)
\r
390 fields[0].in_value = &field0[i];
\r
391 fields[1].in_handler = buf_to_u32_handler;
\r
392 fields[1].in_handler_priv = (u8*)&field1[i];
\r
394 jtag_add_pathmove(3, path);
\r
395 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
399 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
401 ERROR("JTAG error while receiving data from debug handler");
\r
405 /* examine results */
\r
406 for (i = words_done; i < num_words; i++)
\r
408 if (!(field0[0] & 1))
\r
410 /* move backwards if necessary */
\r
412 for (j = i; j < num_words - 1; j++)
\r
414 field0[j] = field0[j+1];
\r
415 field1[j] = field1[j+1];
\r
420 if (words_scheduled == 0)
\r
422 if (attempts++ == 1000)
\r
424 ERROR("Failed to receiving data from debug handler after 1000 attempts");
\r
425 retval = ERROR_JTAG_QUEUE_FAILED;
\r
430 words_done += words_scheduled;
\r
433 for (i = 0; i < num_words; i++)
\r
434 *(buffer++) = buf_get_u32((u8*)&field1[i], 0, 32);
\r
441 int xscale_read_tx(target_t *target, int consume)
\r
443 armv4_5_common_t *armv4_5 = target->arch_info;
\r
444 xscale_common_t *xscale = armv4_5->arch_info;
\r
445 enum tap_state path[3];
\r
446 enum tap_state noconsume_path[9];
\r
449 struct timeval timeout, now;
\r
451 scan_field_t fields[3];
\r
452 u8 field0_in = 0x0;
\r
453 u8 field0_check_value = 0x2;
\r
454 u8 field0_check_mask = 0x6;
\r
455 u8 field2_check_value = 0x0;
\r
456 u8 field2_check_mask = 0x1;
\r
458 jtag_add_end_state(TAP_RTI);
\r
460 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgtx);
\r
466 noconsume_path[0] = TAP_SDS;
\r
467 noconsume_path[1] = TAP_CD;
\r
468 noconsume_path[2] = TAP_E1D;
\r
469 noconsume_path[3] = TAP_PD;
\r
470 noconsume_path[4] = TAP_E2D;
\r
471 noconsume_path[5] = TAP_UD;
\r
472 noconsume_path[6] = TAP_SDS;
\r
473 noconsume_path[7] = TAP_CD;
\r
474 noconsume_path[8] = TAP_SD;
\r
476 fields[0].device = xscale->jtag_info.chain_pos;
\r
477 fields[0].num_bits = 3;
\r
478 fields[0].out_value = NULL;
\r
479 fields[0].out_mask = NULL;
\r
480 fields[0].in_value = &field0_in;
\r
481 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
483 fields[1].device = xscale->jtag_info.chain_pos;
\r
484 fields[1].num_bits = 32;
\r
485 fields[1].out_value = NULL;
\r
486 fields[1].out_mask = NULL;
\r
487 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
\r
488 fields[1].in_handler = NULL;
\r
489 fields[1].in_handler_priv = NULL;
\r
490 fields[1].in_check_value = NULL;
\r
491 fields[1].in_check_mask = NULL;
\r
495 fields[2].device = xscale->jtag_info.chain_pos;
\r
496 fields[2].num_bits = 1;
\r
497 fields[2].out_value = NULL;
\r
498 fields[2].out_mask = NULL;
\r
499 fields[2].in_value = NULL;
\r
500 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
502 gettimeofday(&timeout, NULL);
\r
503 timeval_add_time(&timeout, 5, 0);
\r
507 /* if we want to consume the register content (i.e. clear TX_READY),
\r
508 * we have to go straight from Capture-DR to Shift-DR
\r
509 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
\r
512 jtag_add_pathmove(3, path);
\r
514 jtag_add_pathmove(sizeof(noconsume_path)/sizeof(*noconsume_path), noconsume_path);
\r
516 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
518 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
520 ERROR("JTAG error while reading TX");
\r
521 return ERROR_TARGET_TIMEOUT;
\r
524 gettimeofday(&now, NULL);
\r
525 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
\r
527 ERROR("time out reading TX register");
\r
528 return ERROR_TARGET_TIMEOUT;
\r
530 } while ((!(field0_in & 1)) && consume);
\r
532 if (!(field0_in & 1))
\r
533 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
538 int xscale_write_rx(target_t *target)
\r
540 armv4_5_common_t *armv4_5 = target->arch_info;
\r
541 xscale_common_t *xscale = armv4_5->arch_info;
\r
544 struct timeval timeout, now;
\r
546 scan_field_t fields[3];
\r
547 u8 field0_out = 0x0;
\r
548 u8 field0_in = 0x0;
\r
549 u8 field0_check_value = 0x2;
\r
550 u8 field0_check_mask = 0x6;
\r
552 u8 field2_check_value = 0x0;
\r
553 u8 field2_check_mask = 0x1;
\r
555 jtag_add_end_state(TAP_RTI);
\r
557 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
\r
559 fields[0].device = xscale->jtag_info.chain_pos;
\r
560 fields[0].num_bits = 3;
\r
561 fields[0].out_value = &field0_out;
\r
562 fields[0].out_mask = NULL;
\r
563 fields[0].in_value = &field0_in;
\r
564 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
566 fields[1].device = xscale->jtag_info.chain_pos;
\r
567 fields[1].num_bits = 32;
\r
568 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
\r
569 fields[1].out_mask = NULL;
\r
570 fields[1].in_value = NULL;
\r
571 fields[1].in_handler = NULL;
\r
572 fields[1].in_handler_priv = NULL;
\r
573 fields[1].in_check_value = NULL;
\r
574 fields[1].in_check_mask = NULL;
\r
578 fields[2].device = xscale->jtag_info.chain_pos;
\r
579 fields[2].num_bits = 1;
\r
580 fields[2].out_value = &field2;
\r
581 fields[2].out_mask = NULL;
\r
582 fields[2].in_value = NULL;
\r
583 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
585 gettimeofday(&timeout, NULL);
\r
586 timeval_add_time(&timeout, 5, 0);
\r
588 /* poll until rx_read is low */
\r
589 DEBUG("polling RX");
\r
592 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
594 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
596 ERROR("JTAG error while writing RX");
\r
600 gettimeofday(&now, NULL);
\r
601 if ((now.tv_sec > timeout.tv_sec) || ((now.tv_sec == timeout.tv_sec)&& (now.tv_usec > timeout.tv_usec)))
\r
603 ERROR("time out writing RX register");
\r
604 return ERROR_TARGET_TIMEOUT;
\r
606 } while (field0_in & 1);
\r
610 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
612 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
614 ERROR("JTAG error while writing RX");
\r
621 /* send count elements of size byte to the debug handler */
\r
622 int xscale_send(target_t *target, u8 *buffer, int count, int size)
\r
624 armv4_5_common_t *armv4_5 = target->arch_info;
\r
625 xscale_common_t *xscale = armv4_5->arch_info;
\r
629 int done_count = 0;
\r
630 u8 output[4] = {0, 0, 0, 0};
\r
632 scan_field_t fields[3];
\r
633 u8 field0_out = 0x0;
\r
634 u8 field0_check_value = 0x2;
\r
635 u8 field0_check_mask = 0x6;
\r
637 u8 field2_check_value = 0x0;
\r
638 u8 field2_check_mask = 0x1;
\r
640 jtag_add_end_state(TAP_RTI);
\r
642 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dbgrx);
\r
644 fields[0].device = xscale->jtag_info.chain_pos;
\r
645 fields[0].num_bits = 3;
\r
646 fields[0].out_value = &field0_out;
\r
647 fields[0].out_mask = NULL;
\r
648 fields[0].in_handler = NULL;
\r
649 if (!xscale->fast_memory_access)
\r
651 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
654 fields[1].device = xscale->jtag_info.chain_pos;
\r
655 fields[1].num_bits = 32;
\r
656 fields[1].out_value = output;
\r
657 fields[1].out_mask = NULL;
\r
658 fields[1].in_value = NULL;
\r
659 fields[1].in_handler = NULL;
\r
660 fields[1].in_handler_priv = NULL;
\r
661 fields[1].in_check_value = NULL;
\r
662 fields[1].in_check_mask = NULL;
\r
666 fields[2].device = xscale->jtag_info.chain_pos;
\r
667 fields[2].num_bits = 1;
\r
668 fields[2].out_value = &field2;
\r
669 fields[2].out_mask = NULL;
\r
670 fields[2].in_value = NULL;
\r
671 fields[2].in_handler = NULL;
\r
672 if (!xscale->fast_memory_access)
\r
674 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
679 int endianness = target->endianness;
\r
680 while (done_count++ < count)
\r
682 if (endianness == TARGET_LITTLE_ENDIAN)
\r
684 output[0]=buffer[0];
\r
685 output[1]=buffer[1];
\r
686 output[2]=buffer[2];
\r
687 output[3]=buffer[3];
\r
690 output[0]=buffer[3];
\r
691 output[1]=buffer[2];
\r
692 output[2]=buffer[1];
\r
693 output[3]=buffer[0];
\r
695 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
701 while (done_count++ < count)
\r
703 /* extract sized element from target-endian buffer, and put it
\r
704 * into little-endian output buffer
\r
709 buf_set_u32(output, 0, 32, target_buffer_get_u16(target, buffer));
\r
712 output[0] = *buffer;
\r
715 ERROR("BUG: size neither 4, 2 nor 1");
\r
719 jtag_add_dr_scan(3, fields, TAP_RTI);
\r
725 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
727 ERROR("JTAG error while sending data to debug handler");
\r
734 int xscale_send_u32(target_t *target, u32 value)
\r
736 armv4_5_common_t *armv4_5 = target->arch_info;
\r
737 xscale_common_t *xscale = armv4_5->arch_info;
\r
739 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
\r
740 return xscale_write_rx(target);
\r
743 int xscale_write_dcsr(target_t *target, int hold_rst, int ext_dbg_brk)
\r
745 armv4_5_common_t *armv4_5 = target->arch_info;
\r
746 xscale_common_t *xscale = armv4_5->arch_info;
\r
750 scan_field_t fields[3];
\r
752 u8 field0_check_value = 0x2;
\r
753 u8 field0_check_mask = 0x7;
\r
755 u8 field2_check_value = 0x0;
\r
756 u8 field2_check_mask = 0x1;
\r
758 if (hold_rst != -1)
\r
759 xscale->hold_rst = hold_rst;
\r
761 if (ext_dbg_brk != -1)
\r
762 xscale->external_debug_break = ext_dbg_brk;
\r
764 jtag_add_end_state(TAP_RTI);
\r
765 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
\r
767 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
\r
768 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
\r
770 fields[0].device = xscale->jtag_info.chain_pos;
\r
771 fields[0].num_bits = 3;
\r
772 fields[0].out_value = &field0;
\r
773 fields[0].out_mask = NULL;
\r
774 fields[0].in_value = NULL;
\r
775 jtag_set_check_value(fields+0, &field0_check_value, &field0_check_mask, NULL);
\r
777 fields[1].device = xscale->jtag_info.chain_pos;
\r
778 fields[1].num_bits = 32;
\r
779 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
\r
780 fields[1].out_mask = NULL;
\r
781 fields[1].in_value = NULL;
\r
782 fields[1].in_handler = NULL;
\r
783 fields[1].in_handler_priv = NULL;
\r
784 fields[1].in_check_value = NULL;
\r
785 fields[1].in_check_mask = NULL;
\r
789 fields[2].device = xscale->jtag_info.chain_pos;
\r
790 fields[2].num_bits = 1;
\r
791 fields[2].out_value = &field2;
\r
792 fields[2].out_mask = NULL;
\r
793 fields[2].in_value = NULL;
\r
794 jtag_set_check_value(fields+2, &field2_check_value, &field2_check_mask, NULL);
\r
796 jtag_add_dr_scan(3, fields, -1);
\r
798 if ((retval = jtag_execute_queue()) != ERROR_OK)
\r
800 ERROR("JTAG error while writing DCSR");
\r
804 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
\r
805 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
\r
810 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
\r
811 unsigned int parity (unsigned int v)
\r
813 unsigned int ov = v;
\r
818 DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1);
\r
819 return (0x6996 >> v) & 1;
\r
822 int xscale_load_ic(target_t *target, int mini, u32 va, u32 buffer[8])
\r
824 armv4_5_common_t *armv4_5 = target->arch_info;
\r
825 xscale_common_t *xscale = armv4_5->arch_info;
\r
830 scan_field_t fields[2];
\r
832 DEBUG("loading miniIC at 0x%8.8x", va);
\r
834 jtag_add_end_state(TAP_RTI);
\r
835 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
\r
837 /* CMD is b010 for Main IC and b011 for Mini IC */
\r
839 buf_set_u32(&cmd, 0, 3, 0x3);
\r
841 buf_set_u32(&cmd, 0, 3, 0x2);
\r
843 buf_set_u32(&cmd, 3, 3, 0x0);
\r
845 /* virtual address of desired cache line */
\r
846 buf_set_u32(packet, 0, 27, va >> 5);
\r
848 fields[0].device = xscale->jtag_info.chain_pos;
\r
849 fields[0].num_bits = 6;
\r
850 fields[0].out_value = &cmd;
\r
851 fields[0].out_mask = NULL;
\r
852 fields[0].in_value = NULL;
\r
853 fields[0].in_check_value = NULL;
\r
854 fields[0].in_check_mask = NULL;
\r
855 fields[0].in_handler = NULL;
\r
856 fields[0].in_handler_priv = NULL;
\r
858 fields[1].device = xscale->jtag_info.chain_pos;
\r
859 fields[1].num_bits = 27;
\r
860 fields[1].out_value = packet;
\r
861 fields[1].out_mask = NULL;
\r
862 fields[1].in_value = NULL;
\r
863 fields[1].in_check_value = NULL;
\r
864 fields[1].in_check_mask = NULL;
\r
865 fields[1].in_handler = NULL;
\r
866 fields[1].in_handler_priv = NULL;
\r
868 jtag_add_dr_scan(2, fields, -1);
\r
870 fields[0].num_bits = 32;
\r
871 fields[0].out_value = packet;
\r
873 fields[1].num_bits = 1;
\r
874 fields[1].out_value = &cmd;
\r
876 for (word = 0; word < 8; word++)
\r
878 buf_set_u32(packet, 0, 32, buffer[word]);
\r
879 cmd = parity(*((u32*)packet));
\r
880 jtag_add_dr_scan(2, fields, -1);
\r
883 jtag_execute_queue();
\r
888 int xscale_invalidate_ic_line(target_t *target, u32 va)
\r
890 armv4_5_common_t *armv4_5 = target->arch_info;
\r
891 xscale_common_t *xscale = armv4_5->arch_info;
\r
895 scan_field_t fields[2];
\r
897 jtag_add_end_state(TAP_RTI);
\r
898 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.ldic); /* LDIC */
\r
900 /* CMD for invalidate IC line b000, bits [6:4] b000 */
\r
901 buf_set_u32(&cmd, 0, 6, 0x0);
\r
903 /* virtual address of desired cache line */
\r
904 buf_set_u32(packet, 0, 27, va >> 5);
\r
906 fields[0].device = xscale->jtag_info.chain_pos;
\r
907 fields[0].num_bits = 6;
\r
908 fields[0].out_value = &cmd;
\r
909 fields[0].out_mask = NULL;
\r
910 fields[0].in_value = NULL;
\r
911 fields[0].in_check_value = NULL;
\r
912 fields[0].in_check_mask = NULL;
\r
913 fields[0].in_handler = NULL;
\r
914 fields[0].in_handler_priv = NULL;
\r
916 fields[1].device = xscale->jtag_info.chain_pos;
\r
917 fields[1].num_bits = 27;
\r
918 fields[1].out_value = packet;
\r
919 fields[1].out_mask = NULL;
\r
920 fields[1].in_value = NULL;
\r
921 fields[1].in_check_value = NULL;
\r
922 fields[1].in_check_mask = NULL;
\r
923 fields[1].in_handler = NULL;
\r
924 fields[1].in_handler_priv = NULL;
\r
926 jtag_add_dr_scan(2, fields, -1);
\r
931 int xscale_update_vectors(target_t *target)
\r
933 armv4_5_common_t *armv4_5 = target->arch_info;
\r
934 xscale_common_t *xscale = armv4_5->arch_info;
\r
937 u32 low_reset_branch, high_reset_branch;
\r
939 for (i = 1; i < 8; i++)
\r
941 /* if there's a static vector specified for this exception, override */
\r
942 if (xscale->static_high_vectors_set & (1 << i))
\r
944 xscale->high_vectors[i] = xscale->static_high_vectors[i];
\r
948 if (target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]) != ERROR_OK)
\r
950 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
\r
955 for (i = 1; i < 8; i++)
\r
957 if (xscale->static_low_vectors_set & (1 << i))
\r
959 xscale->low_vectors[i] = xscale->static_low_vectors[i];
\r
963 if (target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]) != ERROR_OK)
\r
965 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
\r
970 /* calculate branches to debug handler */
\r
971 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
\r
972 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
\r
974 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
\r
975 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
\r
977 /* invalidate and load exception vectors in mini i-cache */
\r
978 xscale_invalidate_ic_line(target, 0x0);
\r
979 xscale_invalidate_ic_line(target, 0xffff0000);
\r
981 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
\r
982 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
\r
987 int xscale_arch_state(struct target_s *target)
\r
989 armv4_5_common_t *armv4_5 = target->arch_info;
\r
990 xscale_common_t *xscale = armv4_5->arch_info;
\r
994 "disabled", "enabled"
\r
997 char *arch_dbg_reason[] =
\r
999 "", "\n(processor reset)", "\n(trace buffer full)"
\r
1002 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
\r
1004 ERROR("BUG: called for a non-ARMv4/5 target");
\r
1008 USER("target halted in %s state due to %s, current mode: %s\n"
\r
1009 "cpsr: 0x%8.8x pc: 0x%8.8x\n"
\r
1010 "MMU: %s, D-Cache: %s, I-Cache: %s"
\r
1012 armv4_5_state_strings[armv4_5->core_state],
\r
1013 target_debug_reason_strings[target->debug_reason],
\r
1014 armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)],
\r
1015 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
\r
1016 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32),
\r
1017 state[xscale->armv4_5_mmu.mmu_enabled],
\r
1018 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
\r
1019 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
\r
1020 arch_dbg_reason[xscale->arch_debug_reason]);
\r
1025 int xscale_poll(target_t *target)
\r
1027 int retval=ERROR_OK;
\r
1028 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1029 xscale_common_t *xscale = armv4_5->arch_info;
\r
1031 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING))
\r
1033 enum target_state previous_state = target->state;
\r
1034 if ((retval = xscale_read_tx(target, 0)) == ERROR_OK)
\r
1037 /* there's data to read from the tx register, we entered debug state */
\r
1038 xscale->handler_running = 1;
\r
1040 target->state = TARGET_HALTED;
\r
1042 /* process debug entry, fetching current mode regs */
\r
1043 retval = xscale_debug_entry(target);
\r
1045 else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
\r
1047 USER("error while polling TX register, reset CPU");
\r
1048 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
\r
1049 target->state = TARGET_HALTED;
\r
1052 /* debug_entry could have overwritten target state (i.e. immediate resume)
\r
1053 * don't signal event handlers in that case
\r
1055 if (target->state != TARGET_HALTED)
\r
1058 /* if target was running, signal that we halted
\r
1059 * otherwise we reentered from debug execution */
\r
1060 if (previous_state == TARGET_RUNNING)
\r
1061 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
\r
1063 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
\r
1068 int xscale_debug_entry(target_t *target)
\r
1070 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1071 xscale_common_t *xscale = armv4_5->arch_info;
\r
1078 /* clear external dbg break (will be written on next DCSR read) */
\r
1079 xscale->external_debug_break = 0;
\r
1080 xscale_read_dcsr(target);
\r
1082 /* get r0, pc, r1 to r7 and cpsr */
\r
1083 xscale_receive(target, buffer, 10);
\r
1085 /* move r0 from buffer to register cache */
\r
1086 buf_set_u32(armv4_5->core_cache->reg_list[0].value, 0, 32, buffer[0]);
\r
1087 armv4_5->core_cache->reg_list[15].dirty = 1;
\r
1088 armv4_5->core_cache->reg_list[15].valid = 1;
\r
1089 DEBUG("r0: 0x%8.8x", buffer[0]);
\r
1091 /* move pc from buffer to register cache */
\r
1092 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, buffer[1]);
\r
1093 armv4_5->core_cache->reg_list[15].dirty = 1;
\r
1094 armv4_5->core_cache->reg_list[15].valid = 1;
\r
1095 DEBUG("pc: 0x%8.8x", buffer[1]);
\r
1097 /* move data from buffer to register cache */
\r
1098 for (i = 1; i <= 7; i++)
\r
1100 buf_set_u32(armv4_5->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
\r
1101 armv4_5->core_cache->reg_list[i].dirty = 1;
\r
1102 armv4_5->core_cache->reg_list[i].valid = 1;
\r
1103 DEBUG("r%i: 0x%8.8x", i, buffer[i + 1]);
\r
1106 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, buffer[9]);
\r
1107 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
\r
1108 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
\r
1109 DEBUG("cpsr: 0x%8.8x", buffer[9]);
\r
1111 armv4_5->core_mode = buffer[9] & 0x1f;
\r
1112 if (armv4_5_mode_to_number(armv4_5->core_mode) == -1)
\r
1114 target->state = TARGET_UNKNOWN;
\r
1115 ERROR("cpsr contains invalid mode value - communication failure");
\r
1116 return ERROR_TARGET_FAILURE;
\r
1118 DEBUG("target entered debug state in %s mode", armv4_5_mode_strings[armv4_5_mode_to_number(armv4_5->core_mode)]);
\r
1120 if (buffer[9] & 0x20)
\r
1121 armv4_5->core_state = ARMV4_5_STATE_THUMB;
\r
1123 armv4_5->core_state = ARMV4_5_STATE_ARM;
\r
1125 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
\r
1126 if ((armv4_5->core_mode != ARMV4_5_MODE_USR) && (armv4_5->core_mode != ARMV4_5_MODE_SYS))
\r
1128 xscale_receive(target, buffer, 8);
\r
1129 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
\r
1130 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
\r
1131 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
\r
1135 /* r8 to r14, but no spsr */
\r
1136 xscale_receive(target, buffer, 7);
\r
1139 /* move data from buffer to register cache */
\r
1140 for (i = 8; i <= 14; i++)
\r
1142 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).value, 0, 32, buffer[i - 8]);
\r
1143 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).dirty = 0;
\r
1144 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i).valid = 1;
\r
1147 /* examine debug reason */
\r
1148 xscale_read_dcsr(target);
\r
1149 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
\r
1151 /* stored PC (for calculating fixup) */
\r
1152 pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
1156 case 0x0: /* Processor reset */
\r
1157 target->debug_reason = DBG_REASON_DBGRQ;
\r
1158 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
\r
1161 case 0x1: /* Instruction breakpoint hit */
\r
1162 target->debug_reason = DBG_REASON_BREAKPOINT;
\r
1163 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
\r
1166 case 0x2: /* Data breakpoint hit */
\r
1167 target->debug_reason = DBG_REASON_WATCHPOINT;
\r
1168 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
\r
1171 case 0x3: /* BKPT instruction executed */
\r
1172 target->debug_reason = DBG_REASON_BREAKPOINT;
\r
1173 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
\r
1176 case 0x4: /* Ext. debug event */
\r
1177 target->debug_reason = DBG_REASON_DBGRQ;
\r
1178 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
\r
1181 case 0x5: /* Vector trap occured */
\r
1182 target->debug_reason = DBG_REASON_BREAKPOINT;
\r
1183 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
\r
1186 case 0x6: /* Trace buffer full break */
\r
1187 target->debug_reason = DBG_REASON_DBGRQ;
\r
1188 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
\r
1191 case 0x7: /* Reserved */
\r
1193 ERROR("Method of Entry is 'Reserved'");
\r
1198 /* apply PC fixup */
\r
1199 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, pc);
\r
1201 /* on the first debug entry, identify cache type */
\r
1202 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1)
\r
1204 u32 cache_type_reg;
\r
1206 /* read cp15 cache type register */
\r
1207 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
\r
1208 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value, 0, 32);
\r
1210 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
\r
1213 /* examine MMU and Cache settings */
\r
1214 /* read cp15 control register */
\r
1215 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
\r
1216 xscale->cp15_control_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
\r
1217 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
\r
1218 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
\r
1219 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
\r
1221 /* tracing enabled, read collected trace data */
\r
1222 if (xscale->trace.buffer_enabled)
\r
1224 xscale_read_trace(target);
\r
1225 xscale->trace.buffer_fill--;
\r
1227 /* resume if we're still collecting trace data */
\r
1228 if ((xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL)
\r
1229 && (xscale->trace.buffer_fill > 0))
\r
1231 xscale_resume(target, 1, 0x0, 1, 0);
\r
1235 xscale->trace.buffer_enabled = 0;
\r
1242 int xscale_halt(target_t *target)
\r
1244 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1245 xscale_common_t *xscale = armv4_5->arch_info;
\r
1247 DEBUG("target->state: %s", target_state_strings[target->state]);
\r
1249 if (target->state == TARGET_HALTED)
\r
1251 WARNING("target was already halted");
\r
1252 return ERROR_TARGET_ALREADY_HALTED;
\r
1254 else if (target->state == TARGET_UNKNOWN)
\r
1256 /* this must not happen for a xscale target */
\r
1257 ERROR("target was in unknown state when halt was requested");
\r
1258 return ERROR_TARGET_INVALID;
\r
1260 else if (target->state == TARGET_RESET)
\r
1262 DEBUG("target->state == TARGET_RESET");
\r
1266 /* assert external dbg break */
\r
1267 xscale->external_debug_break = 1;
\r
1268 xscale_read_dcsr(target);
\r
1270 target->debug_reason = DBG_REASON_DBGRQ;
\r
1276 int xscale_enable_single_step(struct target_s *target, u32 next_pc)
\r
1278 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1279 xscale_common_t *xscale= armv4_5->arch_info;
\r
1280 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
\r
1282 if (xscale->ibcr0_used)
\r
1284 breakpoint_t *ibcr0_bp = breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
\r
1288 xscale_unset_breakpoint(target, ibcr0_bp);
\r
1292 ERROR("BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
\r
1297 xscale_set_reg_u32(ibcr0, next_pc | 0x1);
\r
1302 int xscale_disable_single_step(struct target_s *target)
\r
1304 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1305 xscale_common_t *xscale= armv4_5->arch_info;
\r
1306 reg_t *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
\r
1308 xscale_set_reg_u32(ibcr0, 0x0);
\r
1313 int xscale_resume(struct target_s *target, int current, u32 address, int handle_breakpoints, int debug_execution)
\r
1315 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1316 xscale_common_t *xscale= armv4_5->arch_info;
\r
1317 breakpoint_t *breakpoint = target->breakpoints;
\r
1326 if (target->state != TARGET_HALTED)
\r
1328 WARNING("target not halted");
\r
1329 return ERROR_TARGET_NOT_HALTED;
\r
1332 if (!debug_execution)
\r
1334 target_free_all_working_areas(target);
\r
1337 /* update vector tables */
\r
1338 xscale_update_vectors(target);
\r
1340 /* current = 1: continue on current pc, otherwise continue at <address> */
\r
1342 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
\r
1344 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
1346 /* if we're at the reset vector, we have to simulate the branch */
\r
1347 if (current_pc == 0x0)
\r
1349 arm_simulate_step(target, NULL);
\r
1350 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
1353 /* the front-end may request us not to handle breakpoints */
\r
1354 if (handle_breakpoints)
\r
1356 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
\r
1360 /* there's a breakpoint at the current PC, we have to step over it */
\r
1361 DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
\r
1362 xscale_unset_breakpoint(target, breakpoint);
\r
1364 /* calculate PC of next instruction */
\r
1365 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
\r
1367 u32 current_opcode;
\r
1368 target_read_u32(target, current_pc, ¤t_opcode);
\r
1369 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
\r
1372 DEBUG("enable single-step");
\r
1373 xscale_enable_single_step(target, next_pc);
\r
1375 /* restore banked registers */
\r
1376 xscale_restore_context(target);
\r
1378 /* send resume request (command 0x30 or 0x31)
\r
1379 * clean the trace buffer if it is to be enabled (0x62) */
\r
1380 if (xscale->trace.buffer_enabled)
\r
1382 xscale_send_u32(target, 0x62);
\r
1383 xscale_send_u32(target, 0x31);
\r
1386 xscale_send_u32(target, 0x30);
\r
1389 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1390 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1392 for (i = 7; i >= 0; i--)
\r
1394 /* send register */
\r
1395 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1396 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1400 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1401 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1403 /* wait for and process debug entry */
\r
1404 xscale_debug_entry(target);
\r
1406 DEBUG("disable single-step");
\r
1407 xscale_disable_single_step(target);
\r
1409 DEBUG("set breakpoint at 0x%8.8x", breakpoint->address);
\r
1410 xscale_set_breakpoint(target, breakpoint);
\r
1414 /* enable any pending breakpoints and watchpoints */
\r
1415 xscale_enable_breakpoints(target);
\r
1416 xscale_enable_watchpoints(target);
\r
1418 /* restore banked registers */
\r
1419 xscale_restore_context(target);
\r
1421 /* send resume request (command 0x30 or 0x31)
\r
1422 * clean the trace buffer if it is to be enabled (0x62) */
\r
1423 if (xscale->trace.buffer_enabled)
\r
1425 xscale_send_u32(target, 0x62);
\r
1426 xscale_send_u32(target, 0x31);
\r
1429 xscale_send_u32(target, 0x30);
\r
1432 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1433 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1435 for (i = 7; i >= 0; i--)
\r
1437 /* send register */
\r
1438 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1439 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1443 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1444 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1446 target->debug_reason = DBG_REASON_NOTHALTED;
\r
1448 if (!debug_execution)
\r
1450 /* registers are now invalid */
\r
1451 armv4_5_invalidate_core_regs(target);
\r
1452 target->state = TARGET_RUNNING;
\r
1453 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
\r
1457 target->state = TARGET_DEBUG_RUNNING;
\r
1458 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
\r
1461 DEBUG("target resumed");
\r
1463 xscale->handler_running = 1;
\r
1468 int xscale_step(struct target_s *target, int current, u32 address, int handle_breakpoints)
\r
1470 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1471 xscale_common_t *xscale = armv4_5->arch_info;
\r
1472 breakpoint_t *breakpoint = target->breakpoints;
\r
1474 u32 current_pc, next_pc;
\r
1478 if (target->state != TARGET_HALTED)
\r
1480 WARNING("target not halted");
\r
1481 return ERROR_TARGET_NOT_HALTED;
\r
1484 /* current = 1: continue on current pc, otherwise continue at <address> */
\r
1486 buf_set_u32(armv4_5->core_cache->reg_list[15].value, 0, 32, address);
\r
1488 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
1490 /* if we're at the reset vector, we have to simulate the step */
\r
1491 if (current_pc == 0x0)
\r
1493 arm_simulate_step(target, NULL);
\r
1494 current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
1496 target->debug_reason = DBG_REASON_SINGLESTEP;
\r
1497 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
\r
1502 /* the front-end may request us not to handle breakpoints */
\r
1503 if (handle_breakpoints)
\r
1504 if ((breakpoint = breakpoint_find(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32))))
\r
1506 xscale_unset_breakpoint(target, breakpoint);
\r
1509 target->debug_reason = DBG_REASON_SINGLESTEP;
\r
1511 /* calculate PC of next instruction */
\r
1512 if ((retval = arm_simulate_step(target, &next_pc)) != ERROR_OK)
\r
1514 u32 current_opcode;
\r
1515 target_read_u32(target, current_pc, ¤t_opcode);
\r
1516 ERROR("BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8x", current_opcode);
\r
1519 DEBUG("enable single-step");
\r
1520 xscale_enable_single_step(target, next_pc);
\r
1522 /* restore banked registers */
\r
1523 xscale_restore_context(target);
\r
1525 /* send resume request (command 0x30 or 0x31)
\r
1526 * clean the trace buffer if it is to be enabled (0x62) */
\r
1527 if (xscale->trace.buffer_enabled)
\r
1529 xscale_send_u32(target, 0x62);
\r
1530 xscale_send_u32(target, 0x31);
\r
1533 xscale_send_u32(target, 0x30);
\r
1536 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1537 DEBUG("writing cpsr with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32));
\r
1539 for (i = 7; i >= 0; i--)
\r
1541 /* send register */
\r
1542 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1543 DEBUG("writing r%i with value 0x%8.8x", i, buf_get_u32(armv4_5->core_cache->reg_list[i].value, 0, 32));
\r
1547 xscale_send_u32(target, buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1548 DEBUG("writing PC with value 0x%8.8x", buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
\r
1550 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
\r
1552 /* registers are now invalid */
\r
1553 armv4_5_invalidate_core_regs(target);
\r
1555 /* wait for and process debug entry */
\r
1556 xscale_debug_entry(target);
\r
1558 DEBUG("disable single-step");
\r
1559 xscale_disable_single_step(target);
\r
1561 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
\r
1565 xscale_set_breakpoint(target, breakpoint);
\r
1568 DEBUG("target stepped");
\r
1574 int xscale_assert_reset(target_t *target)
\r
1576 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1577 xscale_common_t *xscale = armv4_5->arch_info;
\r
1579 DEBUG("target->state: %s", target_state_strings[target->state]);
\r
1581 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
\r
1582 * end up in T-L-R, which would reset JTAG
\r
1584 jtag_add_end_state(TAP_RTI);
\r
1585 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, xscale->jtag_info.dcsr);
\r
1587 /* set Hold reset, Halt mode and Trap Reset */
\r
1588 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
\r
1589 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
\r
1590 xscale_write_dcsr(target, 1, 0);
\r
1592 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
\r
1593 xscale_jtag_set_instr(xscale->jtag_info.chain_pos, 0x7f);
\r
1594 jtag_execute_queue();
\r
1596 /* assert reset */
\r
1597 jtag_add_reset(0, 1);
\r
1599 /* sleep 1ms, to be sure we fulfill any requirements */
\r
1600 jtag_add_sleep(1000);
\r
1601 jtag_execute_queue();
\r
1603 target->state = TARGET_RESET;
\r
1608 int xscale_deassert_reset(target_t *target)
\r
1610 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1611 xscale_common_t *xscale = armv4_5->arch_info;
\r
1613 fileio_t debug_handler;
\r
1621 breakpoint_t *breakpoint = target->breakpoints;
\r
1625 xscale->ibcr_available = 2;
\r
1626 xscale->ibcr0_used = 0;
\r
1627 xscale->ibcr1_used = 0;
\r
1629 xscale->dbr_available = 2;
\r
1630 xscale->dbr0_used = 0;
\r
1631 xscale->dbr1_used = 0;
\r
1633 /* mark all hardware breakpoints as unset */
\r
1634 while (breakpoint)
\r
1636 if (breakpoint->type == BKPT_HARD)
\r
1638 breakpoint->set = 0;
\r
1640 breakpoint = breakpoint->next;
\r
1643 if (!xscale->handler_installed)
\r
1645 /* release SRST */
\r
1646 jtag_add_reset(0, 0);
\r
1648 /* wait 300ms; 150 and 100ms were not enough */
\r
1649 jtag_add_sleep(300*1000);
\r
1651 jtag_add_runtest(2030, TAP_RTI);
\r
1652 jtag_execute_queue();
\r
1654 /* set Hold reset, Halt mode and Trap Reset */
\r
1655 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
\r
1656 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
\r
1657 xscale_write_dcsr(target, 1, 0);
\r
1659 /* Load debug handler */
\r
1660 if (fileio_open(&debug_handler, PKGLIBDIR "/xscale/debug_handler.bin", FILEIO_READ, FILEIO_BINARY) != ERROR_OK)
\r
1662 ERROR("file open error: %s", debug_handler.error_str);
\r
1666 if ((binary_size = debug_handler.size) % 4)
\r
1668 ERROR("debug_handler.bin: size not a multiple of 4");
\r
1672 if (binary_size > 0x800)
\r
1674 ERROR("debug_handler.bin: larger than 2kb");
\r
1678 binary_size = CEIL(binary_size, 32) * 32;
\r
1680 address = xscale->handler_address;
\r
1681 while (binary_size > 0)
\r
1683 u32 cache_line[8];
\r
1686 if ((retval = fileio_read(&debug_handler, 32, buffer, &buf_cnt)) != ERROR_OK)
\r
1688 ERROR("reading debug handler failed: %s", debug_handler.error_str);
\r
1691 for (i = 0; i < buf_cnt; i += 4)
\r
1693 /* convert LE buffer to host-endian u32 */
\r
1694 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
\r
1697 for (; buf_cnt < 32; buf_cnt += 4)
\r
1699 cache_line[buf_cnt / 4] = 0xe1a08008;
\r
1702 /* only load addresses other than the reset vectors */
\r
1703 if ((address % 0x400) != 0x0)
\r
1705 xscale_load_ic(target, 1, address, cache_line);
\r
1708 address += buf_cnt;
\r
1709 binary_size -= buf_cnt;
\r
1712 xscale_load_ic(target, 1, 0x0, xscale->low_vectors);
\r
1713 xscale_load_ic(target, 1, 0xffff0000, xscale->high_vectors);
\r
1715 jtag_add_runtest(30, TAP_RTI);
\r
1717 jtag_add_sleep(100000);
\r
1719 /* set Hold reset, Halt mode and Trap Reset */
\r
1720 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
\r
1721 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
\r
1722 xscale_write_dcsr(target, 1, 0);
\r
1724 /* clear Hold reset to let the target run (should enter debug handler) */
\r
1725 xscale_write_dcsr(target, 0, 1);
\r
1726 target->state = TARGET_RUNNING;
\r
1728 if ((target->reset_mode != RESET_HALT) && (target->reset_mode != RESET_INIT))
\r
1730 jtag_add_sleep(10000);
\r
1732 /* we should have entered debug now */
\r
1733 xscale_debug_entry(target);
\r
1734 target->state = TARGET_HALTED;
\r
1736 /* resume the target */
\r
1737 xscale_resume(target, 1, 0x0, 1, 0);
\r
1740 fileio_close(&debug_handler);
\r
1744 jtag_add_reset(0, 0);
\r
1751 int xscale_soft_reset_halt(struct target_s *target)
\r
1757 int xscale_prepare_reset_halt(struct target_s *target)
\r
1759 /* nothing to be done for reset_halt on XScale targets
\r
1760 * we always halt after a reset to upload the debug handler
\r
1765 int xscale_read_core_reg(struct target_s *target, int num, enum armv4_5_mode mode)
\r
1771 int xscale_write_core_reg(struct target_s *target, int num, enum armv4_5_mode mode, u32 value)
\r
1777 int xscale_full_context(target_t *target)
\r
1779 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1787 if (target->state != TARGET_HALTED)
\r
1789 WARNING("target not halted");
\r
1790 return ERROR_TARGET_NOT_HALTED;
\r
1793 buffer = malloc(4 * 8);
\r
1795 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
\r
1796 * we can't enter User mode on an XScale (unpredictable),
\r
1797 * but User shares registers with SYS
\r
1799 for(i = 1; i < 7; i++)
\r
1803 /* check if there are invalid registers in the current mode
\r
1805 for (j = 0; j <= 16; j++)
\r
1807 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid == 0)
\r
1815 /* request banked registers */
\r
1816 xscale_send_u32(target, 0x0);
\r
1819 tmp_cpsr |= armv4_5_number_to_mode(i);
\r
1820 tmp_cpsr |= 0xc0; /* I/F bits */
\r
1822 /* send CPSR for desired mode */
\r
1823 xscale_send_u32(target, tmp_cpsr);
\r
1825 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
\r
1826 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
\r
1828 xscale_receive(target, buffer, 8);
\r
1829 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32, buffer[7]);
\r
1830 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
\r
1831 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).valid = 1;
\r
1835 xscale_receive(target, buffer, 7);
\r
1838 /* move data from buffer to register cache */
\r
1839 for (j = 8; j <= 14; j++)
\r
1841 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).value, 0, 32, buffer[j - 8]);
\r
1842 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
\r
1843 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).valid = 1;
\r
1853 int xscale_restore_context(target_t *target)
\r
1855 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1861 if (target->state != TARGET_HALTED)
\r
1863 WARNING("target not halted");
\r
1864 return ERROR_TARGET_NOT_HALTED;
\r
1867 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
\r
1868 * we can't enter User mode on an XScale (unpredictable),
\r
1869 * but User shares registers with SYS
\r
1871 for(i = 1; i < 7; i++)
\r
1875 /* check if there are invalid registers in the current mode
\r
1877 for (j = 8; j <= 14; j++)
\r
1879 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty == 1)
\r
1883 /* if not USR/SYS, check if the SPSR needs to be written */
\r
1884 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
\r
1886 if (ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty == 1)
\r
1894 /* send banked registers */
\r
1895 xscale_send_u32(target, 0x1);
\r
1898 tmp_cpsr |= armv4_5_number_to_mode(i);
\r
1899 tmp_cpsr |= 0xc0; /* I/F bits */
\r
1901 /* send CPSR for desired mode */
\r
1902 xscale_send_u32(target, tmp_cpsr);
\r
1904 /* send banked registers, r8 to r14, and spsr if not in USR/SYS mode */
\r
1905 for (j = 8; j <= 14; j++)
\r
1907 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, j).value, 0, 32));
\r
1908 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), j).dirty = 0;
\r
1911 if ((armv4_5_number_to_mode(i) != ARMV4_5_MODE_USR) && (armv4_5_number_to_mode(i) != ARMV4_5_MODE_SYS))
\r
1913 xscale_send_u32(target, buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).value, 0, 32));
\r
1914 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_number_to_mode(i), 16).dirty = 0;
\r
1922 int xscale_read_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
\r
1924 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1925 xscale_common_t *xscale = armv4_5->arch_info;
\r
1929 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
\r
1931 if (target->state != TARGET_HALTED)
\r
1933 WARNING("target not halted");
\r
1934 return ERROR_TARGET_NOT_HALTED;
\r
1937 /* sanitize arguments */
\r
1938 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
\r
1939 return ERROR_INVALID_ARGUMENTS;
\r
1941 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
\r
1942 return ERROR_TARGET_UNALIGNED_ACCESS;
\r
1944 /* send memory read request (command 0x1n, n: access size) */
\r
1945 xscale_send_u32(target, 0x10 | size);
\r
1947 /* send base address for read request */
\r
1948 xscale_send_u32(target, address);
\r
1950 /* send number of requested data words */
\r
1951 xscale_send_u32(target, count);
\r
1953 /* receive data from target (count times 32-bit words in host endianness) */
\r
1954 buf32 = malloc(4 * count);
\r
1955 xscale_receive(target, buf32, count);
\r
1957 /* extract data from host-endian buffer into byte stream */
\r
1958 for (i = 0; i < count; i++)
\r
1963 target_buffer_set_u32(target, buffer, buf32[i]);
\r
1967 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
\r
1971 *buffer++ = buf32[i] & 0xff;
\r
1974 ERROR("should never get here");
\r
1981 /* examine DCSR, to see if Sticky Abort (SA) got set */
\r
1982 xscale_read_dcsr(target);
\r
1983 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
\r
1985 /* clear SA bit */
\r
1986 xscale_send_u32(target, 0x60);
\r
1988 return ERROR_TARGET_DATA_ABORT;
\r
1994 int xscale_write_memory(struct target_s *target, u32 address, u32 size, u32 count, u8 *buffer)
\r
1996 armv4_5_common_t *armv4_5 = target->arch_info;
\r
1997 xscale_common_t *xscale = armv4_5->arch_info;
\r
1999 DEBUG("address: 0x%8.8x, size: 0x%8.8x, count: 0x%8.8x", address, size, count);
\r
2001 if (target->state != TARGET_HALTED)
\r
2003 WARNING("target not halted");
\r
2004 return ERROR_TARGET_NOT_HALTED;
\r
2007 /* sanitize arguments */
\r
2008 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
\r
2009 return ERROR_INVALID_ARGUMENTS;
\r
2011 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
\r
2012 return ERROR_TARGET_UNALIGNED_ACCESS;
\r
2014 /* send memory write request (command 0x2n, n: access size) */
\r
2015 xscale_send_u32(target, 0x20 | size);
\r
2017 /* send base address for read request */
\r
2018 xscale_send_u32(target, address);
\r
2020 /* send number of requested data words to be written*/
\r
2021 xscale_send_u32(target, count);
\r
2023 /* extract data from host-endian buffer into byte stream */
\r
2025 for (i = 0; i < count; i++)
\r
2030 value = target_buffer_get_u32(target, buffer);
\r
2031 xscale_send_u32(target, value);
\r
2035 value = target_buffer_get_u16(target, buffer);
\r
2036 xscale_send_u32(target, value);
\r
2041 xscale_send_u32(target, value);
\r
2045 ERROR("should never get here");
\r
2050 xscale_send(target, buffer, count, size);
\r
2052 /* examine DCSR, to see if Sticky Abort (SA) got set */
\r
2053 xscale_read_dcsr(target);
\r
2054 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1)
\r
2056 /* clear SA bit */
\r
2057 xscale_send_u32(target, 0x60);
\r
2059 return ERROR_TARGET_DATA_ABORT;
\r
2065 int xscale_bulk_write_memory(target_t *target, u32 address, u32 count, u8 *buffer)
\r
2067 return xscale_write_memory(target, address, 4, count, buffer);
\r
2070 int xscale_checksum_memory(struct target_s *target, u32 address, u32 count, u32* checksum)
\r
2072 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
2075 u32 xscale_get_ttb(target_t *target)
\r
2077 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2078 xscale_common_t *xscale = armv4_5->arch_info;
\r
2081 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
\r
2082 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
\r
2087 void xscale_disable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
\r
2089 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2090 xscale_common_t *xscale = armv4_5->arch_info;
\r
2093 /* read cp15 control register */
\r
2094 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
\r
2095 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
\r
2098 cp15_control &= ~0x1U;
\r
2102 /* clean DCache */
\r
2103 xscale_send_u32(target, 0x50);
\r
2104 xscale_send_u32(target, xscale->cache_clean_address);
\r
2106 /* invalidate DCache */
\r
2107 xscale_send_u32(target, 0x51);
\r
2109 cp15_control &= ~0x4U;
\r
2114 /* invalidate ICache */
\r
2115 xscale_send_u32(target, 0x52);
\r
2116 cp15_control &= ~0x1000U;
\r
2119 /* write new cp15 control register */
\r
2120 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
\r
2122 /* execute cpwait to ensure outstanding operations complete */
\r
2123 xscale_send_u32(target, 0x53);
\r
2126 void xscale_enable_mmu_caches(target_t *target, int mmu, int d_u_cache, int i_cache)
\r
2128 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2129 xscale_common_t *xscale = armv4_5->arch_info;
\r
2132 /* read cp15 control register */
\r
2133 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
\r
2134 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
\r
2137 cp15_control |= 0x1U;
\r
2140 cp15_control |= 0x4U;
\r
2143 cp15_control |= 0x1000U;
\r
2145 /* write new cp15 control register */
\r
2146 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
\r
2148 /* execute cpwait to ensure outstanding operations complete */
\r
2149 xscale_send_u32(target, 0x53);
\r
2152 int xscale_set_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
\r
2154 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2155 xscale_common_t *xscale = armv4_5->arch_info;
\r
2157 if (target->state != TARGET_HALTED)
\r
2159 WARNING("target not halted");
\r
2160 return ERROR_TARGET_NOT_HALTED;
\r
2163 if (xscale->force_hw_bkpts)
\r
2164 breakpoint->type = BKPT_HARD;
\r
2166 if (breakpoint->set)
\r
2168 WARNING("breakpoint already set");
\r
2172 if (breakpoint->type == BKPT_HARD)
\r
2174 u32 value = breakpoint->address | 1;
\r
2175 if (!xscale->ibcr0_used)
\r
2177 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
\r
2178 xscale->ibcr0_used = 1;
\r
2179 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
\r
2181 else if (!xscale->ibcr1_used)
\r
2183 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
\r
2184 xscale->ibcr1_used = 1;
\r
2185 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
\r
2189 ERROR("BUG: no hardware comparator available");
\r
2193 else if (breakpoint->type == BKPT_SOFT)
\r
2195 if (breakpoint->length == 4)
\r
2197 /* keep the original instruction in target endianness */
\r
2198 target->type->read_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
\r
2199 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
\r
2200 target_write_u32(target, breakpoint->address, xscale->arm_bkpt);
\r
2204 /* keep the original instruction in target endianness */
\r
2205 target->type->read_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
\r
2206 /* write the original instruction in target endianness (arm7_9->arm_bkpt is host endian) */
\r
2207 target_write_u32(target, breakpoint->address, xscale->thumb_bkpt);
\r
2209 breakpoint->set = 1;
\r
2216 int xscale_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
\r
2218 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2219 xscale_common_t *xscale = armv4_5->arch_info;
\r
2221 if (target->state != TARGET_HALTED)
\r
2223 WARNING("target not halted");
\r
2224 return ERROR_TARGET_NOT_HALTED;
\r
2227 if (xscale->force_hw_bkpts)
\r
2229 DEBUG("forcing use of hardware breakpoint at address 0x%8.8x", breakpoint->address);
\r
2230 breakpoint->type = BKPT_HARD;
\r
2233 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1))
\r
2235 INFO("no breakpoint unit available for hardware breakpoint");
\r
2236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
2240 xscale->ibcr_available--;
\r
2243 if ((breakpoint->length != 2) && (breakpoint->length != 4))
\r
2245 INFO("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
\r
2246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
2252 int xscale_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
\r
2254 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2255 xscale_common_t *xscale = armv4_5->arch_info;
\r
2257 if (target->state != TARGET_HALTED)
\r
2259 WARNING("target not halted");
\r
2260 return ERROR_TARGET_NOT_HALTED;
\r
2263 if (!breakpoint->set)
\r
2265 WARNING("breakpoint not set");
\r
2269 if (breakpoint->type == BKPT_HARD)
\r
2271 if (breakpoint->set == 1)
\r
2273 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
\r
2274 xscale->ibcr0_used = 0;
\r
2276 else if (breakpoint->set == 2)
\r
2278 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
\r
2279 xscale->ibcr1_used = 0;
\r
2281 breakpoint->set = 0;
\r
2285 /* restore original instruction (kept in target endianness) */
\r
2286 if (breakpoint->length == 4)
\r
2288 target->type->write_memory(target, breakpoint->address, 4, 1, breakpoint->orig_instr);
\r
2292 target->type->write_memory(target, breakpoint->address, 2, 1, breakpoint->orig_instr);
\r
2294 breakpoint->set = 0;
\r
2300 int xscale_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
\r
2302 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2303 xscale_common_t *xscale = armv4_5->arch_info;
\r
2305 if (target->state != TARGET_HALTED)
\r
2307 WARNING("target not halted");
\r
2308 return ERROR_TARGET_NOT_HALTED;
\r
2311 if (breakpoint->set)
\r
2313 xscale_unset_breakpoint(target, breakpoint);
\r
2316 if (breakpoint->type == BKPT_HARD)
\r
2317 xscale->ibcr_available++;
\r
2322 int xscale_set_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
\r
2324 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2325 xscale_common_t *xscale = armv4_5->arch_info;
\r
2327 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
\r
2328 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
\r
2330 if (target->state != TARGET_HALTED)
\r
2332 WARNING("target not halted");
\r
2333 return ERROR_TARGET_NOT_HALTED;
\r
2336 xscale_get_reg(dbcon);
\r
2338 switch (watchpoint->rw)
\r
2350 ERROR("BUG: watchpoint->rw neither read, write nor access");
\r
2353 if (!xscale->dbr0_used)
\r
2355 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
\r
2356 dbcon_value |= enable;
\r
2357 xscale_set_reg_u32(dbcon, dbcon_value);
\r
2358 watchpoint->set = 1;
\r
2359 xscale->dbr0_used = 1;
\r
2361 else if (!xscale->dbr1_used)
\r
2363 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
\r
2364 dbcon_value |= enable << 2;
\r
2365 xscale_set_reg_u32(dbcon, dbcon_value);
\r
2366 watchpoint->set = 2;
\r
2367 xscale->dbr1_used = 1;
\r
2371 ERROR("BUG: no hardware comparator available");
\r
2378 int xscale_add_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
\r
2380 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2381 xscale_common_t *xscale = armv4_5->arch_info;
\r
2383 if (target->state != TARGET_HALTED)
\r
2385 WARNING("target not halted");
\r
2386 return ERROR_TARGET_NOT_HALTED;
\r
2389 if (xscale->dbr_available < 1)
\r
2391 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
2394 if ((watchpoint->length != 1) && (watchpoint->length != 2) && (watchpoint->length != 4))
\r
2396 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
\r
2399 xscale->dbr_available--;
\r
2404 int xscale_unset_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
\r
2406 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2407 xscale_common_t *xscale = armv4_5->arch_info;
\r
2408 reg_t *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
\r
2409 u32 dbcon_value = buf_get_u32(dbcon->value, 0, 32);
\r
2411 if (target->state != TARGET_HALTED)
\r
2413 WARNING("target not halted");
\r
2414 return ERROR_TARGET_NOT_HALTED;
\r
2417 if (!watchpoint->set)
\r
2419 WARNING("breakpoint not set");
\r
2423 if (watchpoint->set == 1)
\r
2425 dbcon_value &= ~0x3;
\r
2426 xscale_set_reg_u32(dbcon, dbcon_value);
\r
2427 xscale->dbr0_used = 0;
\r
2429 else if (watchpoint->set == 2)
\r
2431 dbcon_value &= ~0xc;
\r
2432 xscale_set_reg_u32(dbcon, dbcon_value);
\r
2433 xscale->dbr1_used = 0;
\r
2435 watchpoint->set = 0;
\r
2440 int xscale_remove_watchpoint(struct target_s *target, watchpoint_t *watchpoint)
\r
2442 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2443 xscale_common_t *xscale = armv4_5->arch_info;
\r
2445 if (target->state != TARGET_HALTED)
\r
2447 WARNING("target not halted");
\r
2448 return ERROR_TARGET_NOT_HALTED;
\r
2451 if (watchpoint->set)
\r
2453 xscale_unset_watchpoint(target, watchpoint);
\r
2456 xscale->dbr_available++;
\r
2461 void xscale_enable_watchpoints(struct target_s *target)
\r
2463 watchpoint_t *watchpoint = target->watchpoints;
\r
2465 while (watchpoint)
\r
2467 if (watchpoint->set == 0)
\r
2468 xscale_set_watchpoint(target, watchpoint);
\r
2469 watchpoint = watchpoint->next;
\r
2473 void xscale_enable_breakpoints(struct target_s *target)
\r
2475 breakpoint_t *breakpoint = target->breakpoints;
\r
2477 /* set any pending breakpoints */
\r
2478 while (breakpoint)
\r
2480 if (breakpoint->set == 0)
\r
2481 xscale_set_breakpoint(target, breakpoint);
\r
2482 breakpoint = breakpoint->next;
\r
2486 int xscale_get_reg(reg_t *reg)
\r
2488 xscale_reg_t *arch_info = reg->arch_info;
\r
2489 target_t *target = arch_info->target;
\r
2490 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2491 xscale_common_t *xscale = armv4_5->arch_info;
\r
2493 /* DCSR, TX and RX are accessible via JTAG */
\r
2494 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
\r
2496 return xscale_read_dcsr(arch_info->target);
\r
2498 else if (strcmp(reg->name, "XSCALE_TX") == 0)
\r
2500 /* 1 = consume register content */
\r
2501 return xscale_read_tx(arch_info->target, 1);
\r
2503 else if (strcmp(reg->name, "XSCALE_RX") == 0)
\r
2505 /* can't read from RX register (host -> debug handler) */
\r
2508 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
\r
2510 /* can't (explicitly) read from TXRXCTRL register */
\r
2513 else /* Other DBG registers have to be transfered by the debug handler */
\r
2515 /* send CP read request (command 0x40) */
\r
2516 xscale_send_u32(target, 0x40);
\r
2518 /* send CP register number */
\r
2519 xscale_send_u32(target, arch_info->dbg_handler_number);
\r
2521 /* read register value */
\r
2522 xscale_read_tx(target, 1);
\r
2523 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
\r
2532 int xscale_set_reg(reg_t *reg, u8* buf)
\r
2534 xscale_reg_t *arch_info = reg->arch_info;
\r
2535 target_t *target = arch_info->target;
\r
2536 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2537 xscale_common_t *xscale = armv4_5->arch_info;
\r
2538 u32 value = buf_get_u32(buf, 0, 32);
\r
2540 /* DCSR, TX and RX are accessible via JTAG */
\r
2541 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
\r
2543 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
\r
2544 return xscale_write_dcsr(arch_info->target, -1, -1);
\r
2546 else if (strcmp(reg->name, "XSCALE_RX") == 0)
\r
2548 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
\r
2549 return xscale_write_rx(arch_info->target);
\r
2551 else if (strcmp(reg->name, "XSCALE_TX") == 0)
\r
2553 /* can't write to TX register (debug-handler -> host) */
\r
2556 else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0)
\r
2558 /* can't (explicitly) write to TXRXCTRL register */
\r
2561 else /* Other DBG registers have to be transfered by the debug handler */
\r
2563 /* send CP write request (command 0x41) */
\r
2564 xscale_send_u32(target, 0x41);
\r
2566 /* send CP register number */
\r
2567 xscale_send_u32(target, arch_info->dbg_handler_number);
\r
2569 /* send CP register value */
\r
2570 xscale_send_u32(target, value);
\r
2571 buf_set_u32(reg->value, 0, 32, value);
\r
2577 /* convenience wrapper to access XScale specific registers */
\r
2578 int xscale_set_reg_u32(reg_t *reg, u32 value)
\r
2582 buf_set_u32(buf, 0, 32, value);
\r
2584 return xscale_set_reg(reg, buf);
\r
2587 int xscale_write_dcsr_sw(target_t *target, u32 value)
\r
2589 /* get pointers to arch-specific information */
\r
2590 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2591 xscale_common_t *xscale = armv4_5->arch_info;
\r
2592 reg_t *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
\r
2593 xscale_reg_t *dcsr_arch_info = dcsr->arch_info;
\r
2595 /* send CP write request (command 0x41) */
\r
2596 xscale_send_u32(target, 0x41);
\r
2598 /* send CP register number */
\r
2599 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
\r
2601 /* send CP register value */
\r
2602 xscale_send_u32(target, value);
\r
2603 buf_set_u32(dcsr->value, 0, 32, value);
\r
2608 int xscale_read_trace(target_t *target)
\r
2610 /* get pointers to arch-specific information */
\r
2611 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2612 xscale_common_t *xscale = armv4_5->arch_info;
\r
2613 xscale_trace_data_t **trace_data_p;
\r
2615 /* 258 words from debug handler
\r
2616 * 256 trace buffer entries
\r
2617 * 2 checkpoint addresses
\r
2619 u32 trace_buffer[258];
\r
2620 int is_address[256];
\r
2623 if (target->state != TARGET_HALTED)
\r
2625 WARNING("target must be stopped to read trace data");
\r
2626 return ERROR_TARGET_NOT_HALTED;
\r
2629 /* send read trace buffer command (command 0x61) */
\r
2630 xscale_send_u32(target, 0x61);
\r
2632 /* receive trace buffer content */
\r
2633 xscale_receive(target, trace_buffer, 258);
\r
2635 /* parse buffer backwards to identify address entries */
\r
2636 for (i = 255; i >= 0; i--)
\r
2638 is_address[i] = 0;
\r
2639 if (((trace_buffer[i] & 0xf0) == 0x90) ||
\r
2640 ((trace_buffer[i] & 0xf0) == 0xd0))
\r
2643 is_address[--i] = 1;
\r
2645 is_address[--i] = 1;
\r
2647 is_address[--i] = 1;
\r
2649 is_address[--i] = 1;
\r
2654 /* search first non-zero entry */
\r
2655 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
\r
2660 DEBUG("no trace data collected");
\r
2661 return ERROR_XSCALE_NO_TRACE_DATA;
\r
2664 for (trace_data_p = &xscale->trace.data; *trace_data_p; trace_data_p = &(*trace_data_p)->next)
\r
2667 *trace_data_p = malloc(sizeof(xscale_trace_data_t));
\r
2668 (*trace_data_p)->next = NULL;
\r
2669 (*trace_data_p)->chkpt0 = trace_buffer[256];
\r
2670 (*trace_data_p)->chkpt1 = trace_buffer[257];
\r
2671 (*trace_data_p)->last_instruction = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
2672 (*trace_data_p)->entries = malloc(sizeof(xscale_trace_entry_t) * (256 - j));
\r
2673 (*trace_data_p)->depth = 256 - j;
\r
2675 for (i = j; i < 256; i++)
\r
2677 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
\r
2678 if (is_address[i])
\r
2679 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
\r
2681 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
\r
2687 int xscale_read_instruction(target_t *target, arm_instruction_t *instruction)
\r
2689 /* get pointers to arch-specific information */
\r
2690 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2691 xscale_common_t *xscale = armv4_5->arch_info;
\r
2698 if (!xscale->trace.image)
\r
2699 return ERROR_TRACE_IMAGE_UNAVAILABLE;
\r
2701 /* search for the section the current instruction belongs to */
\r
2702 for (i = 0; i < xscale->trace.image->num_sections; i++)
\r
2704 if ((xscale->trace.image->sections[i].base_address <= xscale->trace.current_pc) &&
\r
2705 (xscale->trace.image->sections[i].base_address + xscale->trace.image->sections[i].size > xscale->trace.current_pc))
\r
2712 if (section == -1)
\r
2714 /* current instruction couldn't be found in the image */
\r
2715 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
\r
2718 if (xscale->trace.core_state == ARMV4_5_STATE_ARM)
\r
2721 if ((retval = image_read_section(xscale->trace.image, section,
\r
2722 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
\r
2723 4, buf, &size_read)) != ERROR_OK)
\r
2725 ERROR("error while reading instruction: %i", retval);
\r
2726 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
\r
2728 opcode = target_buffer_get_u32(target, buf);
\r
2729 arm_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
\r
2731 else if (xscale->trace.core_state == ARMV4_5_STATE_THUMB)
\r
2734 if ((retval = image_read_section(xscale->trace.image, section,
\r
2735 xscale->trace.current_pc - xscale->trace.image->sections[section].base_address,
\r
2736 2, buf, &size_read)) != ERROR_OK)
\r
2738 ERROR("error while reading instruction: %i", retval);
\r
2739 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
\r
2741 opcode = target_buffer_get_u16(target, buf);
\r
2742 thumb_evaluate_opcode(opcode, xscale->trace.current_pc, instruction);
\r
2746 ERROR("BUG: unknown core state encountered");
\r
2753 int xscale_branch_address(xscale_trace_data_t *trace_data, int i, u32 *target)
\r
2755 /* if there are less than four entries prior to the indirect branch message
\r
2756 * we can't extract the address */
\r
2762 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
\r
2763 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
\r
2768 int xscale_analyze_trace(target_t *target, command_context_t *cmd_ctx)
\r
2770 /* get pointers to arch-specific information */
\r
2771 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2772 xscale_common_t *xscale = armv4_5->arch_info;
\r
2773 int next_pc_ok = 0;
\r
2774 u32 next_pc = 0x0;
\r
2775 xscale_trace_data_t *trace_data = xscale->trace.data;
\r
2778 while (trace_data)
\r
2784 xscale->trace.core_state = ARMV4_5_STATE_ARM;
\r
2789 for (i = 0; i < trace_data->depth; i++)
\r
2795 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
\r
2798 switch ((trace_data->entries[i].data & 0xf0) >> 4)
\r
2800 case 0: /* Exceptions */
\r
2808 exception = (trace_data->entries[i].data & 0x70) >> 4;
\r
2810 next_pc = (trace_data->entries[i].data & 0xf0) >> 2;
\r
2811 command_print(cmd_ctx, "--- exception %i ---", (trace_data->entries[i].data & 0xf0) >> 4);
\r
2813 case 8: /* Direct Branch */
\r
2816 case 9: /* Indirect Branch */
\r
2818 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
\r
2823 case 13: /* Checkpointed Indirect Branch */
\r
2824 if (xscale_branch_address(trace_data, i, &next_pc) == 0)
\r
2827 if (((chkpt == 0) && (next_pc != trace_data->chkpt0))
\r
2828 || ((chkpt == 1) && (next_pc != trace_data->chkpt1)))
\r
2829 WARNING("checkpointed indirect branch target address doesn't match checkpoint");
\r
2831 /* explicit fall-through */
\r
2832 case 12: /* Checkpointed Direct Branch */
\r
2837 next_pc = trace_data->chkpt0;
\r
2840 else if (chkpt == 1)
\r
2843 next_pc = trace_data->chkpt0;
\r
2848 WARNING("more than two checkpointed branches encountered");
\r
2851 case 15: /* Roll-over */
\r
2854 default: /* Reserved */
\r
2855 command_print(cmd_ctx, "--- reserved trace message ---");
\r
2856 ERROR("BUG: trace message %i is reserved", (trace_data->entries[i].data & 0xf0) >> 4);
\r
2860 if (xscale->trace.pc_ok)
\r
2862 int executed = (trace_data->entries[i].data & 0xf) + rollover * 16;
\r
2863 arm_instruction_t instruction;
\r
2865 if ((exception == 6) || (exception == 7))
\r
2867 /* IRQ or FIQ exception, no instruction executed */
\r
2871 while (executed-- >= 0)
\r
2873 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
\r
2875 /* can't continue tracing with no image available */
\r
2876 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
\r
2880 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
\r
2882 /* TODO: handle incomplete images */
\r
2886 /* a precise abort on a load to the PC is included in the incremental
\r
2887 * word count, other instructions causing data aborts are not included
\r
2889 if ((executed == 0) && (exception == 4)
\r
2890 && ((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDM)))
\r
2892 if ((instruction.type == ARM_LDM)
\r
2893 && ((instruction.info.load_store_multiple.register_list & 0x8000) == 0))
\r
2897 else if (((instruction.type >= ARM_LDR) && (instruction.type <= ARM_LDRSH))
\r
2898 && (instruction.info.load_store.Rd != 15))
\r
2904 /* only the last instruction executed
\r
2905 * (the one that caused the control flow change)
\r
2906 * could be a taken branch
\r
2908 if (((executed == -1) && (branch == 1)) &&
\r
2909 (((instruction.type == ARM_B) ||
\r
2910 (instruction.type == ARM_BL) ||
\r
2911 (instruction.type == ARM_BLX)) &&
\r
2912 (instruction.info.b_bl_bx_blx.target_address != -1)))
\r
2914 xscale->trace.current_pc = instruction.info.b_bl_bx_blx.target_address;
\r
2918 xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2;
\r
2920 command_print(cmd_ctx, "%s", instruction.text);
\r
2928 xscale->trace.current_pc = next_pc;
\r
2929 xscale->trace.pc_ok = 1;
\r
2933 for (; xscale->trace.current_pc < trace_data->last_instruction; xscale->trace.current_pc += (xscale->trace.core_state == ARMV4_5_STATE_ARM) ? 4 : 2)
\r
2935 arm_instruction_t instruction;
\r
2936 if ((retval = xscale_read_instruction(target, &instruction)) != ERROR_OK)
\r
2938 /* can't continue tracing with no image available */
\r
2939 if (retval == ERROR_TRACE_IMAGE_UNAVAILABLE)
\r
2943 else if (retval == ERROR_TRACE_INSTRUCTION_UNAVAILABLE)
\r
2945 /* TODO: handle incomplete images */
\r
2948 command_print(cmd_ctx, "%s", instruction.text);
\r
2951 trace_data = trace_data->next;
\r
2957 void xscale_build_reg_cache(target_t *target)
\r
2959 /* get pointers to arch-specific information */
\r
2960 armv4_5_common_t *armv4_5 = target->arch_info;
\r
2961 xscale_common_t *xscale = armv4_5->arch_info;
\r
2963 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
\r
2964 xscale_reg_t *arch_info = malloc(sizeof(xscale_reg_arch_info));
\r
2966 int num_regs = sizeof(xscale_reg_arch_info) / sizeof(xscale_reg_t);
\r
2968 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
\r
2969 armv4_5->core_cache = (*cache_p);
\r
2971 /* register a register arch-type for XScale dbg registers only once */
\r
2972 if (xscale_reg_arch_type == -1)
\r
2973 xscale_reg_arch_type = register_reg_arch_type(xscale_get_reg, xscale_set_reg);
\r
2975 (*cache_p)->next = malloc(sizeof(reg_cache_t));
\r
2976 cache_p = &(*cache_p)->next;
\r
2978 /* fill in values for the xscale reg cache */
\r
2979 (*cache_p)->name = "XScale registers";
\r
2980 (*cache_p)->next = NULL;
\r
2981 (*cache_p)->reg_list = malloc(num_regs * sizeof(reg_t));
\r
2982 (*cache_p)->num_regs = num_regs;
\r
2984 for (i = 0; i < num_regs; i++)
\r
2986 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
\r
2987 (*cache_p)->reg_list[i].value = calloc(4, 1);
\r
2988 (*cache_p)->reg_list[i].dirty = 0;
\r
2989 (*cache_p)->reg_list[i].valid = 0;
\r
2990 (*cache_p)->reg_list[i].size = 32;
\r
2991 (*cache_p)->reg_list[i].bitfield_desc = NULL;
\r
2992 (*cache_p)->reg_list[i].num_bitfields = 0;
\r
2993 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
\r
2994 (*cache_p)->reg_list[i].arch_type = xscale_reg_arch_type;
\r
2995 arch_info[i] = xscale_reg_arch_info[i];
\r
2996 arch_info[i].target = target;
\r
2999 xscale->reg_cache = (*cache_p);
\r
3002 int xscale_init_target(struct command_context_s *cmd_ctx, struct target_s *target)
\r
3004 if (startup_mode != DAEMON_RESET)
\r
3006 ERROR("XScale target requires a reset");
\r
3007 ERROR("Reset target to enable debug");
\r
3010 /* assert TRST once during startup */
\r
3011 jtag_add_reset(1, 0);
\r
3012 jtag_add_sleep(5000);
\r
3013 jtag_add_reset(0, 0);
\r
3014 jtag_execute_queue();
\r
3025 int xscale_init_arch_info(target_t *target, xscale_common_t *xscale, int chain_pos, char *variant)
\r
3027 armv4_5_common_t *armv4_5;
\r
3028 u32 high_reset_branch, low_reset_branch;
\r
3031 armv4_5 = &xscale->armv4_5_common;
\r
3033 /* store architecture specfic data (none so far) */
\r
3034 xscale->arch_info = NULL;
\r
3035 xscale->common_magic = XSCALE_COMMON_MAGIC;
\r
3037 /* remember the variant (PXA25x, PXA27x, IXP42x, ...) */
\r
3038 xscale->variant = strdup(variant);
\r
3040 /* prepare JTAG information for the new target */
\r
3041 xscale->jtag_info.chain_pos = chain_pos;
\r
3042 jtag_register_event_callback(xscale_jtag_callback, target);
\r
3044 xscale->jtag_info.dbgrx = 0x02;
\r
3045 xscale->jtag_info.dbgtx = 0x10;
\r
3046 xscale->jtag_info.dcsr = 0x09;
\r
3047 xscale->jtag_info.ldic = 0x07;
\r
3049 if ((strcmp(xscale->variant, "pxa250") == 0) ||
\r
3050 (strcmp(xscale->variant, "pxa255") == 0) ||
\r
3051 (strcmp(xscale->variant, "pxa26x") == 0))
\r
3053 xscale->jtag_info.ir_length = 5;
\r
3055 else if ((strcmp(xscale->variant, "pxa27x") == 0) ||
\r
3056 (strcmp(xscale->variant, "ixp42x") == 0) ||
\r
3057 (strcmp(xscale->variant, "ixp45x") == 0) ||
\r
3058 (strcmp(xscale->variant, "ixp46x") == 0))
\r
3060 xscale->jtag_info.ir_length = 7;
\r
3063 /* the debug handler isn't installed (and thus not running) at this time */
\r
3064 xscale->handler_installed = 0;
\r
3065 xscale->handler_running = 0;
\r
3066 xscale->handler_address = 0xfe000800;
\r
3068 /* clear the vectors we keep locally for reference */
\r
3069 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
\r
3070 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
\r
3072 /* no user-specified vectors have been configured yet */
\r
3073 xscale->static_low_vectors_set = 0x0;
\r
3074 xscale->static_high_vectors_set = 0x0;
\r
3076 /* calculate branches to debug handler */
\r
3077 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
\r
3078 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
\r
3080 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
\r
3081 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
\r
3083 for (i = 1; i <= 7; i++)
\r
3085 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
\r
3086 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
\r
3089 /* 64kB aligned region used for DCache cleaning */
\r
3090 xscale->cache_clean_address = 0xfffe0000;
\r
3092 xscale->hold_rst = 0;
\r
3093 xscale->external_debug_break = 0;
\r
3095 xscale->force_hw_bkpts = 1;
\r
3097 xscale->ibcr_available = 2;
\r
3098 xscale->ibcr0_used = 0;
\r
3099 xscale->ibcr1_used = 0;
\r
3101 xscale->dbr_available = 2;
\r
3102 xscale->dbr0_used = 0;
\r
3103 xscale->dbr1_used = 0;
\r
3105 xscale->arm_bkpt = ARMV5_BKPT(0x0);
\r
3106 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
\r
3108 xscale->vector_catch = 0x1;
\r
3110 xscale->trace.capture_status = TRACE_IDLE;
\r
3111 xscale->trace.data = NULL;
\r
3112 xscale->trace.image = NULL;
\r
3113 xscale->trace.buffer_enabled = 0;
\r
3114 xscale->trace.buffer_fill = 0;
\r
3116 /* prepare ARMv4/5 specific information */
\r
3117 armv4_5->arch_info = xscale;
\r
3118 armv4_5->read_core_reg = xscale_read_core_reg;
\r
3119 armv4_5->write_core_reg = xscale_write_core_reg;
\r
3120 armv4_5->full_context = xscale_full_context;
\r
3122 armv4_5_init_arch_info(target, armv4_5);
\r
3124 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
\r
3125 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
\r
3126 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
\r
3127 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
\r
3128 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
\r
3129 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
\r
3130 xscale->armv4_5_mmu.has_tiny_pages = 1;
\r
3131 xscale->armv4_5_mmu.mmu_enabled = 0;
\r
3133 xscale->fast_memory_access = 0;
\r
3138 /* target xscale <endianess> <startup_mode> <chain_pos> <variant> */
\r
3139 int xscale_target_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc, struct target_s *target)
\r
3142 char *variant = NULL;
\r
3143 xscale_common_t *xscale = malloc(sizeof(xscale_common_t));
\r
3147 ERROR("'target xscale' requires four arguments: <endianess> <startup_mode> <chain_pos> <variant>");
\r
3151 chain_pos = strtoul(args[3], NULL, 0);
\r
3153 variant = args[4];
\r
3155 xscale_init_arch_info(target, xscale, chain_pos, variant);
\r
3156 xscale_build_reg_cache(target);
\r
3161 int xscale_handle_debug_handler_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3163 target_t *target = NULL;
\r
3164 armv4_5_common_t *armv4_5;
\r
3165 xscale_common_t *xscale;
\r
3167 u32 handler_address;
\r
3171 ERROR("'xscale debug_handler <target#> <address>' command takes two required operands");
\r
3175 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
\r
3177 ERROR("no target '%s' configured", args[0]);
\r
3181 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3186 handler_address = strtoul(args[1], NULL, 0);
\r
3188 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
\r
3189 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
\r
3191 xscale->handler_address = handler_address;
\r
3195 ERROR("xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
\r
3201 int xscale_handle_cache_clean_address_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3203 target_t *target = NULL;
\r
3204 armv4_5_common_t *armv4_5;
\r
3205 xscale_common_t *xscale;
\r
3207 u32 cache_clean_address;
\r
3211 ERROR("'xscale cache_clean_address <target#> <address>' command takes two required operands");
\r
3215 if ((target = get_target_by_num(strtoul(args[0], NULL, 0))) == NULL)
\r
3217 ERROR("no target '%s' configured", args[0]);
\r
3221 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3226 cache_clean_address = strtoul(args[1], NULL, 0);
\r
3228 if (cache_clean_address & 0xffff)
\r
3230 ERROR("xscale cache_clean_address <address> must be 64kb aligned");
\r
3234 xscale->cache_clean_address = cache_clean_address;
\r
3240 int xscale_handle_cache_info_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3242 target_t *target = get_current_target(cmd_ctx);
\r
3243 armv4_5_common_t *armv4_5;
\r
3244 xscale_common_t *xscale;
\r
3246 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3251 return armv4_5_handle_cache_info_command(cmd_ctx, &xscale->armv4_5_mmu.armv4_5_cache);
\r
3254 static int xscale_virt2phys(struct target_s *target, u32 virtual, u32 *physical)
\r
3256 armv4_5_common_t *armv4_5;
\r
3257 xscale_common_t *xscale;
\r
3264 if ((retval = xscale_get_arch_pointers(target, &armv4_5, &xscale)) != ERROR_OK)
\r
3268 u32 ret = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu, virtual, &type, &cb, &domain, &ap);
\r
3278 static int xscale_mmu(struct target_s *target, int *enabled)
\r
3280 armv4_5_common_t *armv4_5 = target->arch_info;
\r
3281 xscale_common_t *xscale = armv4_5->arch_info;
\r
3283 if (target->state != TARGET_HALTED)
\r
3285 ERROR("Target not halted");
\r
3286 return ERROR_TARGET_INVALID;
\r
3289 *enabled = xscale->armv4_5_mmu.mmu_enabled;
\r
3293 int xscale_handle_mmu_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
\r
3295 target_t *target = get_current_target(cmd_ctx);
\r
3296 armv4_5_common_t *armv4_5;
\r
3297 xscale_common_t *xscale;
\r
3299 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3304 if (target->state != TARGET_HALTED)
\r
3306 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
\r
3312 if (strcmp("enable", args[0]) == 0)
\r
3314 xscale_enable_mmu_caches(target, 1, 0, 0);
\r
3315 xscale->armv4_5_mmu.mmu_enabled = 1;
\r
3317 else if (strcmp("disable", args[0]) == 0)
\r
3319 xscale_disable_mmu_caches(target, 1, 0, 0);
\r
3320 xscale->armv4_5_mmu.mmu_enabled = 0;
\r
3324 command_print(cmd_ctx, "mmu %s", (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
\r
3329 int xscale_handle_idcache_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
\r
3331 target_t *target = get_current_target(cmd_ctx);
\r
3332 armv4_5_common_t *armv4_5;
\r
3333 xscale_common_t *xscale;
\r
3334 int icache = 0, dcache = 0;
\r
3336 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3341 if (target->state != TARGET_HALTED)
\r
3343 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
\r
3347 if (strcmp(cmd, "icache") == 0)
\r
3349 else if (strcmp(cmd, "dcache") == 0)
\r
3354 if (strcmp("enable", args[0]) == 0)
\r
3356 xscale_enable_mmu_caches(target, 0, dcache, icache);
\r
3359 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 1;
\r
3361 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 1;
\r
3363 else if (strcmp("disable", args[0]) == 0)
\r
3365 xscale_disable_mmu_caches(target, 0, dcache, icache);
\r
3368 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = 0;
\r
3370 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = 0;
\r
3375 command_print(cmd_ctx, "icache %s", (xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled) ? "enabled" : "disabled");
\r
3378 command_print(cmd_ctx, "dcache %s", (xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) ? "enabled" : "disabled");
\r
3383 int xscale_handle_vector_catch_command(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
\r
3385 target_t *target = get_current_target(cmd_ctx);
\r
3386 armv4_5_common_t *armv4_5;
\r
3387 xscale_common_t *xscale;
\r
3389 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3396 command_print(cmd_ctx, "usage: xscale vector_catch [mask]");
\r
3400 xscale->vector_catch = strtoul(args[0], NULL, 0);
\r
3401 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 8, xscale->vector_catch);
\r
3402 xscale_write_dcsr(target, -1, -1);
\r
3405 command_print(cmd_ctx, "vector catch mask: 0x%2.2x", xscale->vector_catch);
\r
3410 int xscale_handle_force_hw_bkpts_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3412 target_t *target = get_current_target(cmd_ctx);
\r
3413 armv4_5_common_t *armv4_5;
\r
3414 xscale_common_t *xscale;
\r
3416 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3421 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
\r
3423 xscale->force_hw_bkpts = 1;
\r
3425 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
\r
3427 xscale->force_hw_bkpts = 0;
\r
3431 command_print(cmd_ctx, "usage: xscale force_hw_bkpts <enable|disable>");
\r
3434 command_print(cmd_ctx, "force hardware breakpoints %s", (xscale->force_hw_bkpts) ? "enabled" : "disabled");
\r
3439 int xscale_handle_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3441 target_t *target = get_current_target(cmd_ctx);
\r
3442 armv4_5_common_t *armv4_5;
\r
3443 xscale_common_t *xscale;
\r
3446 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3451 if (target->state != TARGET_HALTED)
\r
3453 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
\r
3457 if ((argc >= 1) && (strcmp("enable", args[0]) == 0))
\r
3459 xscale_trace_data_t *td, *next_td;
\r
3460 xscale->trace.buffer_enabled = 1;
\r
3462 /* free old trace data */
\r
3463 td = xscale->trace.data;
\r
3466 next_td = td->next;
\r
3469 free(td->entries);
\r
3473 xscale->trace.data = NULL;
\r
3475 else if ((argc >= 1) && (strcmp("disable", args[0]) == 0))
\r
3477 xscale->trace.buffer_enabled = 0;
\r
3480 if ((argc >= 2) && (strcmp("fill", args[1]) == 0))
\r
3483 xscale->trace.buffer_fill = strtoul(args[2], NULL, 0);
\r
3485 xscale->trace.buffer_fill = 1;
\r
3487 else if ((argc >= 2) && (strcmp("wrap", args[1]) == 0))
\r
3489 xscale->trace.buffer_fill = -1;
\r
3492 if (xscale->trace.buffer_enabled)
\r
3494 /* if we enable the trace buffer in fill-once
\r
3495 * mode we know the address of the first instruction */
\r
3496 xscale->trace.pc_ok = 1;
\r
3497 xscale->trace.current_pc = buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32);
\r
3501 /* otherwise the address is unknown, and we have no known good PC */
\r
3502 xscale->trace.pc_ok = 0;
\r
3505 command_print(cmd_ctx, "trace buffer %s (%s)",
\r
3506 (xscale->trace.buffer_enabled) ? "enabled" : "disabled",
\r
3507 (xscale->trace.buffer_fill > 0) ? "fill" : "wrap");
\r
3509 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
\r
3510 if (xscale->trace.buffer_fill >= 0)
\r
3511 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
\r
3513 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
\r
3518 int xscale_handle_trace_image_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3521 armv4_5_common_t *armv4_5;
\r
3522 xscale_common_t *xscale;
\r
3526 command_print(cmd_ctx, "usage: xscale trace_image <file> [base address] [type]");
\r
3530 target = get_current_target(cmd_ctx);
\r
3532 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3537 if (xscale->trace.image)
\r
3539 image_close(xscale->trace.image);
\r
3540 free(xscale->trace.image);
\r
3541 command_print(cmd_ctx, "previously loaded image found and closed");
\r
3544 xscale->trace.image = malloc(sizeof(image_t));
\r
3545 xscale->trace.image->base_address_set = 0;
\r
3546 xscale->trace.image->start_address_set = 0;
\r
3548 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
\r
3551 xscale->trace.image->base_address_set = 1;
\r
3552 xscale->trace.image->base_address = strtoul(args[1], NULL, 0);
\r
3556 xscale->trace.image->base_address_set = 0;
\r
3559 if (image_open(xscale->trace.image, args[0], (argc >= 3) ? args[2] : NULL) != ERROR_OK)
\r
3561 command_print(cmd_ctx, "image opening error: %s", xscale->trace.image->error_str);
\r
3562 free(xscale->trace.image);
\r
3563 xscale->trace.image = NULL;
\r
3570 int xscale_handle_dump_trace_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3572 target_t *target = get_current_target(cmd_ctx);
\r
3573 armv4_5_common_t *armv4_5;
\r
3574 xscale_common_t *xscale;
\r
3575 xscale_trace_data_t *trace_data;
\r
3578 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3583 if (target->state != TARGET_HALTED)
\r
3585 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
\r
3591 command_print(cmd_ctx, "usage: xscale dump_trace <file>");
\r
3595 trace_data = xscale->trace.data;
\r
3599 command_print(cmd_ctx, "no trace data collected");
\r
3603 if (fileio_open(&file, args[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
\r
3605 command_print(cmd_ctx, "file open error: %s", file.error_str);
\r
3609 while (trace_data)
\r
3613 fileio_write_u32(&file, trace_data->chkpt0);
\r
3614 fileio_write_u32(&file, trace_data->chkpt1);
\r
3615 fileio_write_u32(&file, trace_data->last_instruction);
\r
3616 fileio_write_u32(&file, trace_data->depth);
\r
3618 for (i = 0; i < trace_data->depth; i++)
\r
3619 fileio_write_u32(&file, trace_data->entries[i].data | ((trace_data->entries[i].type & 0xffff) << 16));
\r
3621 trace_data = trace_data->next;
\r
3624 fileio_close(&file);
\r
3629 int xscale_handle_analyze_trace_buffer_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3631 target_t *target = get_current_target(cmd_ctx);
\r
3632 armv4_5_common_t *armv4_5;
\r
3633 xscale_common_t *xscale;
\r
3635 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3640 xscale_analyze_trace(target, cmd_ctx);
\r
3645 int xscale_handle_cp15(command_context_t *cmd_ctx, char *cmd, char **args, int argc)
\r
3647 target_t *target = get_current_target(cmd_ctx);
\r
3648 armv4_5_common_t *armv4_5;
\r
3649 xscale_common_t *xscale;
\r
3651 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3656 if (target->state != TARGET_HALTED)
\r
3658 command_print(cmd_ctx, "target must be stopped for \"%s\" command", cmd);
\r
3662 reg_t *reg = NULL;
\r
3665 reg_no = strtoul(args[0], NULL, 0);
\r
3666 /*translate from xscale cp15 register no to openocd register*/
\r
3670 reg_no = XSCALE_MAINID;
\r
3673 reg_no = XSCALE_CTRL;
\r
3676 reg_no = XSCALE_TTB;
\r
3679 reg_no = XSCALE_DAC;
\r
3682 reg_no = XSCALE_FSR;
\r
3685 reg_no = XSCALE_FAR;
\r
3688 reg_no = XSCALE_PID;
\r
3691 reg_no = XSCALE_CPACCESS;
\r
3694 command_print(cmd_ctx, "invalid register number");
\r
3695 return ERROR_INVALID_ARGUMENTS;
\r
3697 reg = &xscale->reg_cache->reg_list[reg_no];
\r
3704 /* read cp15 control register */
\r
3705 xscale_get_reg(reg);
\r
3706 value = buf_get_u32(reg->value, 0, 32);
\r
3707 command_print(cmd_ctx, "%s (/%i): 0x%x", reg->name, reg->size, value);
\r
3709 else if(argc == 2)
\r
3712 u32 value = strtoul(args[1], NULL, 0);
\r
3714 /* send CP write request (command 0x41) */
\r
3715 xscale_send_u32(target, 0x41);
\r
3717 /* send CP register number */
\r
3718 xscale_send_u32(target, reg_no);
\r
3720 /* send CP register value */
\r
3721 xscale_send_u32(target, value);
\r
3723 /* execute cpwait to ensure outstanding operations complete */
\r
3724 xscale_send_u32(target, 0x53);
\r
3728 command_print(cmd_ctx, "usage: cp15 [register]<, [value]>");
\r
3734 int handle_xscale_fast_memory_access_command(struct command_context_s *cmd_ctx, char *cmd, char **args, int argc)
\r
3736 target_t *target = get_current_target(cmd_ctx);
\r
3737 armv4_5_common_t *armv4_5;
\r
3738 xscale_common_t *xscale;
\r
3740 if (xscale_get_arch_pointers(target, &armv4_5, &xscale) != ERROR_OK)
\r
3747 if (strcmp("enable", args[0]) == 0)
\r
3749 xscale->fast_memory_access = 1;
\r
3751 else if (strcmp("disable", args[0]) == 0)
\r
3753 xscale->fast_memory_access = 0;
\r
3757 return ERROR_COMMAND_SYNTAX_ERROR;
\r
3759 } else if (argc!=0)
\r
3761 return ERROR_COMMAND_SYNTAX_ERROR;
\r
3764 command_print(cmd_ctx, "fast memory access is %s", (xscale->fast_memory_access) ? "enabled" : "disabled");
\r
3769 int xscale_register_commands(struct command_context_s *cmd_ctx)
\r
3771 command_t *xscale_cmd;
\r
3773 xscale_cmd = register_command(cmd_ctx, NULL, "xscale", NULL, COMMAND_ANY, "xscale specific commands");
\r
3775 register_command(cmd_ctx, xscale_cmd, "debug_handler", xscale_handle_debug_handler_command, COMMAND_ANY, "'xscale debug_handler <target#> <address>' command takes two required operands");
\r
3776 register_command(cmd_ctx, xscale_cmd, "cache_clean_address", xscale_handle_cache_clean_address_command, COMMAND_ANY, NULL);
\r
3778 register_command(cmd_ctx, xscale_cmd, "cache_info", xscale_handle_cache_info_command, COMMAND_EXEC, NULL);
\r
3779 register_command(cmd_ctx, xscale_cmd, "mmu", xscale_handle_mmu_command, COMMAND_EXEC, "['enable'|'disable'] the MMU");
\r
3780 register_command(cmd_ctx, xscale_cmd, "icache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the ICache");
\r
3781 register_command(cmd_ctx, xscale_cmd, "dcache", xscale_handle_idcache_command, COMMAND_EXEC, "['enable'|'disable'] the DCache");
\r
3783 register_command(cmd_ctx, xscale_cmd, "vector_catch", xscale_handle_idcache_command, COMMAND_EXEC, "<mask> of vectors that should be catched");
\r
3785 register_command(cmd_ctx, xscale_cmd, "trace_buffer", xscale_handle_trace_buffer_command, COMMAND_EXEC, "<enable|disable> ['fill' [n]|'wrap']");
\r
3787 register_command(cmd_ctx, xscale_cmd, "dump_trace", xscale_handle_dump_trace_command, COMMAND_EXEC, "dump content of trace buffer to <file>");
\r
3788 register_command(cmd_ctx, xscale_cmd, "analyze_trace", xscale_handle_analyze_trace_buffer_command, COMMAND_EXEC, "analyze content of trace buffer");
\r
3789 register_command(cmd_ctx, xscale_cmd, "trace_image", xscale_handle_trace_image_command,
\r
3790 COMMAND_EXEC, "load image from <file> [base address]");
\r
3792 register_command(cmd_ctx, xscale_cmd, "cp15", xscale_handle_cp15, COMMAND_EXEC, "access coproc 15 <register> [value]");
\r
3793 register_command(cmd_ctx, xscale_cmd, "fast_memory_access", handle_xscale_fast_memory_access_command,
\r
3794 COMMAND_ANY, "use fast memory accesses instead of slower but potentially unsafe slow accesses <enable|disable>");
\r
3796 armv4_5_register_commands(cmd_ctx);
\r