1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 /***************************************************************************
4 * ESP Xtensa SMP target API for OpenOCD *
5 * Copyright (C) 2020 Espressif Systems Ltd. Co *
6 ***************************************************************************/
13 #include <target/target.h>
14 #include <target/target_type.h>
15 #include <target/smp.h>
16 #include "esp_xtensa_smp.h"
19 Multiprocessor stuff common:
21 The ESP Xtensa chip can have several cores in it, which can run in SMP-mode if an
22 SMP-capable OS is running. The hardware has a few features which makes
23 SMP debugging much easier.
25 First of all, there's something called a 'break network', consisting of a
26 BreakIn input and a BreakOut output on each CPU. The idea is that as soon
27 as a CPU goes into debug mode for whatever reason, it'll signal that using
28 its DebugOut pin. This signal is connected to the other CPU's DebugIn
29 input, causing this CPU also to go into debugging mode. To resume execution
30 when using only this break network, we will need to manually resume both
33 An alternative to this is the XOCDMode output and the RunStall (or DebugStall)
34 input. When these are cross-connected, a CPU that goes into debug mode will
35 halt execution entirely on the other CPU. Execution on the other CPU can be
36 resumed by either the first CPU going out of debug mode, or the second CPU
37 going into debug mode: the stall is temporarily lifted as long as the stalled
40 A third, separate, signal is CrossTrigger. This is connected in the same way
41 as the breakIn/breakOut network, but is for the TRAX (trace memory) feature;
42 it does not affect OCD in any way.
48 The ESP Xtensa chip has several Xtensa cores inside, but represent themself to the OCD
49 as one chip that works in multithreading mode under FreeRTOS OS.
50 The core that initiate the stop condition will be defined as an active cpu.
51 When one core stops, then other core will be stopped automatically by smpbreak.
52 The core that initiates stop condition will be defined as an active core, and
53 registers of this core will be transferred.
56 #define ESP_XTENSA_SMP_EXAMINE_OTHER_CORES 5
58 static int esp_xtensa_smp_update_halt_gdb(struct target *target, bool *need_resume);
60 static inline struct esp_xtensa_smp_common *target_to_esp_xtensa_smp(struct target *target)
62 return container_of(target->arch_info, struct esp_xtensa_smp_common, esp_xtensa);
65 int esp_xtensa_smp_assert_reset(struct target *target)
70 int esp_xtensa_smp_deassert_reset(struct target *target)
72 LOG_TARGET_DEBUG(target, "begin");
74 int ret = xtensa_deassert_reset(target);
77 /* in SMP mode when chip was running single-core app the other core can be left un-examined,
78 because examination is done before SOC reset. But after SOC reset it is functional and should be handled.
79 So try to examine un-examined core just after SOC reset */
80 if (target->smp && !target_was_examined(target))
81 ret = xtensa_examine(target);
85 int esp_xtensa_smp_soft_reset_halt(struct target *target)
88 struct target_list *head;
89 struct esp_xtensa_smp_common *esp_xtensa_smp = target_to_esp_xtensa_smp(target);
91 LOG_TARGET_DEBUG(target, "begin");
92 /* in SMP mode we need to ensure that at first we reset SOC on PRO-CPU
93 and then call xtensa_assert_reset() for all cores */
94 if (target->smp && target->coreid != 0)
96 /* Reset the SoC first */
97 if (esp_xtensa_smp->chip_ops->reset) {
98 res = esp_xtensa_smp->chip_ops->reset(target);
103 return xtensa_assert_reset(target);
105 foreach_smp_target(head, target->smp_targets) {
106 res = xtensa_assert_reset(head->target);
113 static struct target *get_halted_esp_xtensa_smp(struct target *target, int32_t coreid)
115 struct target_list *head;
118 foreach_smp_target(head, target->smp_targets) {
120 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
127 int esp_xtensa_smp_poll(struct target *target)
129 enum target_state old_state = target->state;
130 struct esp_xtensa_smp_common *esp_xtensa_smp = target_to_esp_xtensa_smp(target);
131 struct target_list *head;
133 bool other_core_resume_req = false;
135 if (target->state == TARGET_HALTED && target->smp && target->gdb_service && !target->gdb_service->target) {
136 target->gdb_service->target = get_halted_esp_xtensa_smp(target, target->gdb_service->core[1]);
137 LOG_INFO("Switch GDB target to '%s'", target_name(target->gdb_service->target));
138 if (esp_xtensa_smp->chip_ops->on_halt)
139 esp_xtensa_smp->chip_ops->on_halt(target);
140 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
144 int ret = esp_xtensa_poll(target);
149 if (target->state == TARGET_RESET) {
150 esp_xtensa_smp->examine_other_cores = ESP_XTENSA_SMP_EXAMINE_OTHER_CORES;
151 } else if (esp_xtensa_smp->examine_other_cores > 0 &&
152 (target->state == TARGET_RUNNING || target->state == TARGET_HALTED)) {
153 LOG_TARGET_DEBUG(target, "Check for unexamined cores after reset");
154 bool all_examined = true;
155 foreach_smp_target(head, target->smp_targets) {
159 if (!target_was_examined(curr)) {
160 if (target_examine_one(curr) != ERROR_OK) {
161 LOG_DEBUG("Failed to examine!");
162 all_examined = false;
167 esp_xtensa_smp->examine_other_cores = 0;
169 esp_xtensa_smp->examine_other_cores--;
173 if (old_state != TARGET_HALTED && target->state == TARGET_HALTED) {
175 ret = esp_xtensa_smp_update_halt_gdb(target, &other_core_resume_req);
179 /* Call any event callbacks that are applicable */
180 if (old_state == TARGET_DEBUG_RUNNING) {
181 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
183 /* check whether any core polled by esp_xtensa_smp_update_halt_gdb() requested resume */
184 if (target->smp && other_core_resume_req) {
185 /* Resume xtensa_resume will handle BREAK instruction. */
186 ret = target_resume(target, 1, 0, 1, 0);
187 if (ret != ERROR_OK) {
188 LOG_ERROR("Failed to resume target");
193 if (esp_xtensa_smp->chip_ops->on_halt)
194 esp_xtensa_smp->chip_ops->on_halt(target);
195 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
202 static int esp_xtensa_smp_update_halt_gdb(struct target *target, bool *need_resume)
204 struct esp_xtensa_smp_common *esp_xtensa_smp;
205 struct target *gdb_target = NULL;
206 struct target_list *head;
210 *need_resume = false;
212 if (target->gdb_service && target->gdb_service->target)
213 LOG_DEBUG("GDB target '%s'", target_name(target->gdb_service->target));
215 if (target->gdb_service && target->gdb_service->core[0] == -1) {
216 target->gdb_service->target = target;
217 target->gdb_service->core[0] = target->coreid;
218 LOG_INFO("Set GDB target to '%s'", target_name(target));
221 if (target->gdb_service)
222 gdb_target = target->gdb_service->target;
224 /* due to smpbreak config other cores can also go to HALTED state */
225 foreach_smp_target(head, target->smp_targets) {
227 LOG_DEBUG("Check target '%s'", target_name(curr));
228 /* skip calling context */
231 if (!target_was_examined(curr)) {
232 curr->state = TARGET_HALTED;
235 /* skip targets that were already halted */
236 if (curr->state == TARGET_HALTED)
238 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
239 if (curr == gdb_target)
241 LOG_DEBUG("Poll target '%s'", target_name(curr));
243 esp_xtensa_smp = target_to_esp_xtensa_smp(curr);
244 /* avoid auto-resume after syscall, it will be done later */
245 esp_xtensa_smp->other_core_does_resume = true;
246 /* avoid recursion in esp_xtensa_smp_poll() */
248 if (esp_xtensa_smp->chip_ops->poll)
249 ret = esp_xtensa_smp->chip_ops->poll(curr);
251 ret = esp_xtensa_smp_poll(curr);
255 esp_xtensa_smp->other_core_does_resume = false;
258 /* after all targets were updated, poll the gdb serving target */
259 if (gdb_target && gdb_target != target) {
260 esp_xtensa_smp = target_to_esp_xtensa_smp(gdb_target);
261 if (esp_xtensa_smp->chip_ops->poll)
262 ret = esp_xtensa_smp->chip_ops->poll(gdb_target);
264 ret = esp_xtensa_smp_poll(gdb_target);
272 static inline int esp_xtensa_smp_smpbreak_disable(struct target *target, uint32_t *smp_break)
274 int res = xtensa_smpbreak_get(target, smp_break);
277 return xtensa_smpbreak_set(target, 0);
280 static inline int esp_xtensa_smp_smpbreak_restore(struct target *target, uint32_t smp_break)
282 return xtensa_smpbreak_set(target, smp_break);
285 static int esp_xtensa_smp_resume_cores(struct target *target,
286 int handle_breakpoints,
289 struct target_list *head;
292 LOG_TARGET_DEBUG(target, "begin");
294 foreach_smp_target(head, target->smp_targets) {
296 /* in single-core mode disabled core cannot be examined, but need to be resumed too*/
297 if ((curr != target) && (curr->state != TARGET_RUNNING) && target_was_examined(curr)) {
298 /* resume current address, not in SMP mode */
300 int res = esp_xtensa_smp_resume(curr, 1, 0, handle_breakpoints, debug_execution);
309 int esp_xtensa_smp_resume(struct target *target,
311 target_addr_t address,
312 int handle_breakpoints,
318 xtensa_smpbreak_get(target, &smp_break);
319 LOG_TARGET_DEBUG(target, "smp_break=0x%" PRIx32, smp_break);
321 /* dummy resume for smp toggle in order to reduce gdb impact */
322 if ((target->smp) && (target->gdb_service) && (target->gdb_service->core[1] != -1)) {
323 /* simulate a start and halt of target */
324 target->gdb_service->target = NULL;
325 target->gdb_service->core[0] = target->gdb_service->core[1];
326 /* fake resume at next poll we play the target core[1], see poll*/
327 LOG_TARGET_DEBUG(target, "Fake resume");
328 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
332 /* xtensa_prepare_resume() can step over breakpoint/watchpoint and generate signals on BreakInOut circuit for
333 * other cores. So disconnect this core from BreakInOut circuit and do xtensa_prepare_resume(). */
334 res = esp_xtensa_smp_smpbreak_disable(target, &smp_break);
337 res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
338 /* restore configured BreakInOut signals config */
339 int ret = esp_xtensa_smp_smpbreak_restore(target, smp_break);
342 if (res != ERROR_OK) {
343 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
348 if (target->gdb_service)
349 target->gdb_service->core[0] = -1;
350 res = esp_xtensa_smp_resume_cores(target, handle_breakpoints, debug_execution);
355 res = xtensa_do_resume(target);
356 if (res != ERROR_OK) {
357 LOG_TARGET_ERROR(target, "Failed to resume!");
361 target->debug_reason = DBG_REASON_NOTHALTED;
362 if (!debug_execution)
363 target->state = TARGET_RUNNING;
365 target->state = TARGET_DEBUG_RUNNING;
367 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
371 int esp_xtensa_smp_step(struct target *target,
373 target_addr_t address,
374 int handle_breakpoints)
377 uint32_t smp_break = 0;
378 struct esp_xtensa_smp_common *esp_xtensa_smp = target_to_esp_xtensa_smp(target);
381 res = esp_xtensa_smp_smpbreak_disable(target, &smp_break);
385 res = xtensa_step(target, current, address, handle_breakpoints);
387 if (res == ERROR_OK) {
388 if (esp_xtensa_smp->chip_ops->on_halt)
389 esp_xtensa_smp->chip_ops->on_halt(target);
390 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
394 int ret = esp_xtensa_smp_smpbreak_restore(target, smp_break);
402 int esp_xtensa_smp_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
404 int res = xtensa_watchpoint_add(target, watchpoint);
411 struct target_list *head;
412 foreach_smp_target(head, target->smp_targets) {
413 struct target *curr = head->target;
414 if (curr == target || !target_was_examined(curr))
416 /* Need to use high level API here because every target for core contains list of watchpoints.
417 * GDB works with active core only, so we need to duplicate every watchpoint on other cores,
418 * otherwise watchpoint_free() on active core can fail if WP has been initially added on another core. */
420 res = watchpoint_add(curr, watchpoint->address, watchpoint->length,
421 watchpoint->rw, watchpoint->value, watchpoint->mask);
429 int esp_xtensa_smp_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
431 int res = xtensa_watchpoint_remove(target, watchpoint);
438 struct target_list *head;
439 foreach_smp_target(head, target->smp_targets) {
440 struct target *curr = head->target;
443 /* see big comment in esp_xtensa_smp_watchpoint_add() */
445 watchpoint_remove(curr, watchpoint->address);
451 int esp_xtensa_smp_init_arch_info(struct target *target,
452 struct esp_xtensa_smp_common *esp_xtensa_smp,
453 struct xtensa_debug_module_config *dm_cfg,
454 const struct esp_xtensa_smp_chip_ops *chip_ops)
456 int ret = esp_xtensa_init_arch_info(target, &esp_xtensa_smp->esp_xtensa, dm_cfg);
459 esp_xtensa_smp->chip_ops = chip_ops;
460 esp_xtensa_smp->examine_other_cores = ESP_XTENSA_SMP_EXAMINE_OTHER_CORES;
464 int esp_xtensa_smp_target_init(struct command_context *cmd_ctx, struct target *target)
466 return esp_xtensa_target_init(cmd_ctx, target);
469 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtdef)
471 struct target *target = get_current_target(CMD_CTX);
472 if (target->smp && CMD_ARGC > 0) {
473 struct target_list *head;
475 foreach_smp_target(head, target->smp_targets) {
477 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
478 target_to_xtensa(curr));
484 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
485 target_to_xtensa(target));
488 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtopt)
490 struct target *target = get_current_target(CMD_CTX);
491 if (target->smp && CMD_ARGC > 0) {
492 struct target_list *head;
494 foreach_smp_target(head, target->smp_targets) {
496 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
497 target_to_xtensa(curr));
503 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
504 target_to_xtensa(target));
507 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmem)
509 struct target *target = get_current_target(CMD_CTX);
510 if (target->smp && CMD_ARGC > 0) {
511 struct target_list *head;
513 foreach_smp_target(head, target->smp_targets) {
515 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
516 target_to_xtensa(curr));
522 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
523 target_to_xtensa(target));
526 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmpu)
528 struct target *target = get_current_target(CMD_CTX);
529 if (target->smp && CMD_ARGC > 0) {
530 struct target_list *head;
532 foreach_smp_target(head, target->smp_targets) {
534 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
535 target_to_xtensa(curr));
541 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
542 target_to_xtensa(target));
545 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtmmu)
547 struct target *target = get_current_target(CMD_CTX);
548 if (target->smp && CMD_ARGC > 0) {
549 struct target_list *head;
551 foreach_smp_target(head, target->smp_targets) {
553 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
554 target_to_xtensa(curr));
560 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
561 target_to_xtensa(target));
564 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtreg)
566 struct target *target = get_current_target(CMD_CTX);
567 if (target->smp && CMD_ARGC > 0) {
568 struct target_list *head;
570 foreach_smp_target(head, target->smp_targets) {
572 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
573 target_to_xtensa(curr));
579 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
580 target_to_xtensa(target));
583 COMMAND_HANDLER(esp_xtensa_smp_cmd_xtregfmt)
585 struct target *target = get_current_target(CMD_CTX);
586 if (target->smp && CMD_ARGC > 0) {
587 struct target_list *head;
589 foreach_smp_target(head, target->smp_targets) {
591 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
592 target_to_xtensa(curr));
598 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
599 target_to_xtensa(target));
602 COMMAND_HANDLER(esp_xtensa_smp_cmd_permissive_mode)
604 struct target *target = get_current_target(CMD_CTX);
605 if (target->smp && CMD_ARGC > 0) {
606 struct target_list *head;
608 foreach_smp_target(head, target->smp_targets) {
610 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
611 target_to_xtensa(curr));
617 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
618 target_to_xtensa(target));
621 COMMAND_HANDLER(esp_xtensa_smp_cmd_smpbreak)
623 struct target *target = get_current_target(CMD_CTX);
624 if (target->smp && CMD_ARGC > 0) {
625 struct target_list *head;
627 foreach_smp_target(head, target->smp_targets) {
629 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do, curr);
635 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do, target);
638 COMMAND_HANDLER(esp_xtensa_smp_cmd_mask_interrupts)
640 struct target *target = get_current_target(CMD_CTX);
641 if (target->smp && CMD_ARGC > 0) {
642 struct target_list *head;
644 foreach_smp_target(head, target->smp_targets) {
646 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
647 target_to_xtensa(curr));
653 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
654 target_to_xtensa(target));
657 COMMAND_HANDLER(esp_xtensa_smp_cmd_perfmon_enable)
659 struct target *target = get_current_target(CMD_CTX);
660 if (target->smp && CMD_ARGC > 0) {
661 struct target_list *head;
663 foreach_smp_target(head, target->smp_targets) {
665 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
666 target_to_xtensa(curr));
672 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
673 target_to_xtensa(target));
676 COMMAND_HANDLER(esp_xtensa_smp_cmd_perfmon_dump)
678 struct target *target = get_current_target(CMD_CTX);
680 struct target_list *head;
682 foreach_smp_target(head, target->smp_targets) {
684 LOG_INFO("CPU%d:", curr->coreid);
685 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
686 target_to_xtensa(curr));
692 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
693 target_to_xtensa(target));
696 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracestart)
698 struct target *target = get_current_target(CMD_CTX);
700 struct target_list *head;
702 foreach_smp_target(head, target->smp_targets) {
704 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
705 target_to_xtensa(curr));
711 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
712 target_to_xtensa(target));
715 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracestop)
717 struct target *target = get_current_target(CMD_CTX);
719 struct target_list *head;
721 foreach_smp_target(head, target->smp_targets) {
723 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
724 target_to_xtensa(curr));
730 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
731 target_to_xtensa(target));
734 COMMAND_HANDLER(esp_xtensa_smp_cmd_tracedump)
736 struct target *target = get_current_target(CMD_CTX);
738 struct target_list *head;
740 int32_t cores_max_id = 0;
741 /* assume that core IDs are assigned to SMP targets sequentially: 0,1,2... */
742 foreach_smp_target(head, target->smp_targets) {
744 if (cores_max_id < curr->coreid)
745 cores_max_id = curr->coreid;
747 if (CMD_ARGC < ((uint32_t)cores_max_id + 1)) {
749 "Need %d filenames to dump to as output!",
753 foreach_smp_target(head, target->smp_targets) {
755 int ret = CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
756 target_to_xtensa(curr), CMD_ARGV[curr->coreid]);
762 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
763 target_to_xtensa(target), CMD_ARGV[0]);
766 const struct command_registration esp_xtensa_smp_xtensa_command_handlers[] = {
769 .handler = esp_xtensa_smp_cmd_xtdef,
770 .mode = COMMAND_CONFIG,
771 .help = "Configure Xtensa core type",
776 .handler = esp_xtensa_smp_cmd_xtopt,
777 .mode = COMMAND_CONFIG,
778 .help = "Configure Xtensa core option",
779 .usage = "<name> <value>",
783 .handler = esp_xtensa_smp_cmd_xtmem,
784 .mode = COMMAND_CONFIG,
785 .help = "Configure Xtensa memory/cache option",
786 .usage = "<type> [parameters]",
790 .handler = esp_xtensa_smp_cmd_xtmmu,
791 .mode = COMMAND_CONFIG,
792 .help = "Configure Xtensa MMU option",
793 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
797 .handler = esp_xtensa_smp_cmd_xtmpu,
798 .mode = COMMAND_CONFIG,
799 .help = "Configure Xtensa MPU option",
800 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
804 .handler = esp_xtensa_smp_cmd_xtreg,
805 .mode = COMMAND_CONFIG,
806 .help = "Configure Xtensa register",
807 .usage = "<regname> <regnum>",
811 .handler = esp_xtensa_smp_cmd_xtreg,
812 .mode = COMMAND_CONFIG,
813 .help = "Configure number of Xtensa registers",
814 .usage = "<numregs>",
818 .handler = esp_xtensa_smp_cmd_xtregfmt,
819 .mode = COMMAND_CONFIG,
820 .help = "Configure format of Xtensa register map",
821 .usage = "<numgregs>",
824 .name = "set_permissive",
825 .handler = esp_xtensa_smp_cmd_permissive_mode,
827 .help = "When set to 1, enable Xtensa permissive mode (less client-side checks)",
832 .handler = esp_xtensa_smp_cmd_mask_interrupts,
834 .help = "mask Xtensa interrupts at step",
835 .usage = "['on'|'off']",
839 .handler = esp_xtensa_smp_cmd_smpbreak,
841 .help = "Set the way the CPU chains OCD breaks",
843 "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
846 .name = "perfmon_enable",
847 .handler = esp_xtensa_smp_cmd_perfmon_enable,
848 .mode = COMMAND_EXEC,
849 .help = "Enable and start performance counter",
850 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
853 .name = "perfmon_dump",
854 .handler = esp_xtensa_smp_cmd_perfmon_dump,
855 .mode = COMMAND_EXEC,
857 "Dump performance counter value. If no argument specified, dumps all counters.",
858 .usage = "[counter_id]",
861 .name = "tracestart",
862 .handler = esp_xtensa_smp_cmd_tracestart,
863 .mode = COMMAND_EXEC,
865 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
866 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
870 .handler = esp_xtensa_smp_cmd_tracestop,
871 .mode = COMMAND_EXEC,
872 .help = "Tracing: Stop current trace as started by the tracestart command",
877 .handler = esp_xtensa_smp_cmd_tracedump,
878 .mode = COMMAND_EXEC,
879 .help = "Tracing: Dump trace memory to a files. One file per core.",
880 .usage = "<outfile1> <outfile2>",
882 COMMAND_REGISTRATION_DONE
885 const struct command_registration esp_xtensa_smp_command_handlers[] = {
889 .chain = esp_xtensa_smp_xtensa_command_handlers,
891 COMMAND_REGISTRATION_DONE