50c758259275570bff84fbbd3a362ba7fae41c84
[fw/openocd] / src / target / nds32_v3_common.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4  *   Copyright (C) 2013 Andes Technology                                   *
5  *   Hsiangkai Wang <hkwang@andestech.com>                                 *
6  ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "nds32_reg.h"
14 #include "nds32_disassembler.h"
15 #include "nds32.h"
16 #include "nds32_aice.h"
17 #include "nds32_v3_common.h"
18
19 static struct nds32_v3_common_callback *v3_common_callback;
20
21 static int nds32_v3_register_mapping(struct nds32 *nds32, int reg_no)
22 {
23         if (reg_no == PC)
24                 return IR11;
25
26         return reg_no;
27 }
28
29 static int nds32_v3_get_debug_reason(struct nds32 *nds32, uint32_t *reason)
30 {
31         uint32_t edmsw;
32         struct aice_port_s *aice = target_to_aice(nds32->target);
33         aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
34
35         *reason = (edmsw >> 12) & 0x0F;
36
37         return ERROR_OK;
38 }
39
40 /**
41  * Save processor state.  This is called after a HALT instruction
42  * succeeds, and on other occasions the processor enters debug mode
43  * (breakpoint, watchpoint, etc).
44  */
45 static int nds32_v3_debug_entry(struct nds32 *nds32, bool enable_watchpoint)
46 {
47         LOG_DEBUG("nds32_v3_debug_entry");
48
49         enum target_state backup_state = nds32->target->state;
50         nds32->target->state = TARGET_HALTED;
51
52         if (nds32->init_arch_info_after_halted == false) {
53                 /* init architecture info according to config registers */
54                 CHECK_RETVAL(nds32_config(nds32));
55
56                 nds32->init_arch_info_after_halted = true;
57         }
58
59         /* REVISIT entire cache should already be invalid !!! */
60         register_cache_invalidate(nds32->core_cache);
61
62         /* deactivate all hardware breakpoints */
63         CHECK_RETVAL(v3_common_callback->deactivate_hardware_breakpoint(nds32->target));
64
65         if (enable_watchpoint)
66                 CHECK_RETVAL(v3_common_callback->deactivate_hardware_watchpoint(nds32->target));
67
68         struct breakpoint *syscall_break = &(nds32->syscall_break);
69         if (nds32->virtual_hosting) {
70                 if (syscall_break->is_set) {
71                         /** disable virtual hosting */
72
73                         /* remove breakpoint at syscall entry */
74                         target_remove_breakpoint(nds32->target, syscall_break);
75                         syscall_break->is_set = false;
76
77                         uint32_t value_pc;
78                         nds32_get_mapped_reg(nds32, PC, &value_pc);
79                         if (value_pc == syscall_break->address)
80                                 /** process syscall for virtual hosting */
81                                 nds32->hit_syscall = true;
82                 }
83         }
84
85         if (nds32_examine_debug_reason(nds32) != ERROR_OK) {
86                 nds32->target->state = backup_state;
87
88                 /* re-activate all hardware breakpoints & watchpoints */
89                 CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(nds32->target));
90
91                 if (enable_watchpoint)
92                         CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(nds32->target));
93
94                 return ERROR_FAIL;
95         }
96
97         /* Save registers. */
98         nds32_full_context(nds32);
99
100         /* check interrupt level */
101         v3_common_callback->check_interrupt_stack(nds32);
102
103         return ERROR_OK;
104 }
105
106 /**
107  * Restore processor state.
108  */
109 static int nds32_v3_leave_debug_state(struct nds32 *nds32, bool enable_watchpoint)
110 {
111         LOG_DEBUG("nds32_v3_leave_debug_state");
112
113         struct target *target = nds32->target;
114
115         /* activate all hardware breakpoints */
116         CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(target));
117
118         if (enable_watchpoint) {
119                 /* activate all watchpoints */
120                 CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(target));
121         }
122
123         /* restore interrupt stack */
124         v3_common_callback->restore_interrupt_stack(nds32);
125
126         /* REVISIT once we start caring about MMU and cache state,
127          * address it here ...
128          */
129
130         /* restore PSW, PC, and R0 ... after flushing any modified
131          * registers.
132          */
133         CHECK_RETVAL(nds32_restore_context(target));
134
135         if (nds32->virtual_hosting) {
136                 /** enable virtual hosting */
137                 uint32_t value_ir3;
138                 uint32_t entry_size;
139                 uint32_t syscall_address;
140
141                 /* get syscall entry address */
142                 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
143                 entry_size = 0x4 << (((value_ir3 >> 14) & 0x3) << 1);
144                 syscall_address = (value_ir3 & 0xFFFF0000) + entry_size * 8; /* The index of SYSCALL is 8 */
145
146                 if (nds32->hit_syscall) {
147                         /* single step to skip syscall entry */
148                         /* use IRET to skip syscall */
149                         struct aice_port_s *aice = target_to_aice(target);
150                         uint32_t value_ir9;
151                         uint32_t value_ir6;
152                         uint32_t syscall_id;
153
154                         nds32_get_mapped_reg(nds32, IR6, &value_ir6);
155                         syscall_id = (value_ir6 >> 16) & 0x7FFF;
156
157                         if (syscall_id == NDS32_SYSCALL_EXIT) {
158                                 /* If target hits exit syscall, do not use IRET to skip handler. */
159                                 aice_step(aice);
160                         } else {
161                                 /* use api->read/write_reg to skip nds32 register cache */
162                                 uint32_t value_dimbr;
163                                 aice_read_debug_reg(aice, NDS_EDM_SR_DIMBR, &value_dimbr);
164                                 aice_write_register(aice, IR11, value_dimbr + 0xC);
165
166                                 aice_read_register(aice, IR9, &value_ir9);
167                                 value_ir9 += 4; /* syscall is always 4 bytes */
168                                 aice_write_register(aice, IR9, value_ir9);
169
170                                 /* backup hardware breakpoint 0 */
171                                 uint32_t backup_bpa, backup_bpam, backup_bpc;
172                                 aice_read_debug_reg(aice, NDS_EDM_SR_BPA0, &backup_bpa);
173                                 aice_read_debug_reg(aice, NDS_EDM_SR_BPAM0, &backup_bpam);
174                                 aice_read_debug_reg(aice, NDS_EDM_SR_BPC0, &backup_bpc);
175
176                                 /* use hardware breakpoint 0 to stop cpu after skipping syscall */
177                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, value_ir9);
178                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, 0);
179                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, 0xA);
180
181                                 /* Execute two IRET.
182                                  * First IRET is used to quit debug mode.
183                                  * Second IRET is used to quit current syscall. */
184                                 uint32_t dim_inst[4] = {NOP, NOP, IRET, IRET};
185                                 aice_execute(aice, dim_inst, 4);
186
187                                 /* restore origin hardware breakpoint 0 */
188                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, backup_bpa);
189                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, backup_bpam);
190                                 aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, backup_bpc);
191                         }
192
193                         nds32->hit_syscall = false;
194                 }
195
196                 /* insert breakpoint at syscall entry */
197                 struct breakpoint *syscall_break = &(nds32->syscall_break);
198
199                 syscall_break->address = syscall_address;
200                 syscall_break->type = BKPT_SOFT;
201                 syscall_break->is_set = true;
202                 target_add_breakpoint(target, syscall_break);
203         }
204
205         return ERROR_OK;
206 }
207
208 static int nds32_v3_get_exception_address(struct nds32 *nds32,
209                 uint32_t *address, uint32_t reason)
210 {
211         LOG_DEBUG("nds32_v3_get_exception_address");
212
213         struct aice_port_s *aice = target_to_aice(nds32->target);
214         struct target *target = nds32->target;
215         uint32_t edmsw;
216         uint32_t edm_cfg;
217         uint32_t match_bits;
218         uint32_t match_count;
219         int32_t i;
220         static int32_t number_of_hard_break;
221         uint32_t bp_control;
222
223         if (number_of_hard_break == 0) {
224                 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
225                 number_of_hard_break = (edm_cfg & 0x7) + 1;
226         }
227
228         aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
229         /* clear matching bits (write-one-clear) */
230         aice_write_debug_reg(aice, NDS_EDM_SR_EDMSW, edmsw);
231         match_bits = (edmsw >> 4) & 0xFF;
232         match_count = 0;
233         for (i = 0 ; i < number_of_hard_break ; i++) {
234                 if (match_bits & (1 << i)) {
235                         aice_read_debug_reg(aice, NDS_EDM_SR_BPA0 + i, address);
236                         match_count++;
237
238                         /* If target hits multiple read/access watchpoint,
239                          * select the first one. */
240                         aice_read_debug_reg(aice, NDS_EDM_SR_BPC0 + i, &bp_control);
241                         if (0x3 == (bp_control & 0x3)) {
242                                 match_count = 1;
243                                 break;
244                         }
245                 }
246         }
247
248         if (match_count > 1) { /* multiple hits */
249                 *address = 0;
250                 return ERROR_OK;
251         } else if (match_count == 1) {
252                 uint32_t val_pc;
253                 uint32_t opcode;
254                 struct nds32_instruction instruction;
255                 struct watchpoint *wp;
256                 bool hit;
257
258                 nds32_get_mapped_reg(nds32, PC, &val_pc);
259
260                 if ((reason == NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE) ||
261                                 (reason == NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE)) {
262                         if (edmsw & 0x4) /* check EDMSW.IS_16BIT */
263                                 val_pc -= 2;
264                         else
265                                 val_pc -= 4;
266                 }
267
268                 nds32_read_opcode(nds32, val_pc, &opcode);
269                 nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
270
271                 LOG_DEBUG("PC: 0x%08" PRIx32 ", access start: 0x%08" PRIx32 ", end: 0x%08" PRIx32,
272                                 val_pc, instruction.access_start, instruction.access_end);
273
274                 /* check if multiple hits in the access range */
275                 uint32_t in_range_watch_count = 0;
276                 for (wp = target->watchpoints; wp; wp = wp->next) {
277                         if ((instruction.access_start <= wp->address) &&
278                                         (wp->address < instruction.access_end))
279                                 in_range_watch_count++;
280                 }
281                 if (in_range_watch_count > 1) {
282                         /* Hit LSMW instruction. */
283                         *address = 0;
284                         return ERROR_OK;
285                 }
286
287                 /* dispel false match */
288                 hit = false;
289                 for (wp = target->watchpoints; wp; wp = wp->next) {
290                         if (((*address ^ wp->address) & (~wp->mask)) == 0) {
291                                 uint32_t watch_start;
292                                 uint32_t watch_end;
293
294                                 watch_start = wp->address;
295                                 watch_end = wp->address + wp->length;
296
297                                 if ((watch_end <= instruction.access_start) ||
298                                                 (instruction.access_end <= watch_start))
299                                         continue;
300
301                                 hit = true;
302                                 break;
303                         }
304                 }
305
306                 if (hit)
307                         return ERROR_OK;
308                 else
309                         return ERROR_FAIL;
310         } else if (match_count == 0) {
311                 /* global stop is precise exception */
312                 if ((reason == NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP) && nds32->global_stop) {
313                         /* parse instruction to get correct access address */
314                         uint32_t val_pc;
315                         uint32_t opcode;
316                         struct nds32_instruction instruction;
317
318                         nds32_get_mapped_reg(nds32, PC, &val_pc);
319                         nds32_read_opcode(nds32, val_pc, &opcode);
320                         nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
321
322                         *address = instruction.access_start;
323
324                         return ERROR_OK;
325                 }
326         }
327
328         *address = 0xFFFFFFFF;
329         return ERROR_FAIL;
330 }
331
332 void nds32_v3_common_register_callback(struct nds32_v3_common_callback *callback)
333 {
334         v3_common_callback = callback;
335 }
336
337 /** target_type functions: */
338 /* target request support */
339 int nds32_v3_target_request_data(struct target *target,
340                 uint32_t size, uint8_t *buffer)
341 {
342         /* AndesCore could use DTR register to communicate with OpenOCD
343          * to output messages
344          * Target data will be put in buffer
345          * The format of DTR is as follow
346          * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
347          * target_req_cmd has three possible values:
348          *   TARGET_REQ_TRACEMSG
349          *   TARGET_REQ_DEBUGMSG
350          *   TARGET_REQ_DEBUGCHAR
351          * if size == 0, target will call target_asciimsg(),
352          * else call target_hexmsg()
353          */
354         LOG_WARNING("Not implemented: %s", __func__);
355
356         return ERROR_OK;
357 }
358
359 int nds32_v3_checksum_memory(struct target *target,
360                 target_addr_t address, uint32_t count, uint32_t *checksum)
361 {
362         LOG_WARNING("Not implemented: %s", __func__);
363
364         return ERROR_FAIL;
365 }
366
367 /**
368  * find out which watchpoint hits
369  * get exception address and compare the address to watchpoints
370  */
371 int nds32_v3_hit_watchpoint(struct target *target,
372                 struct watchpoint **hit_watchpoint)
373 {
374         static struct watchpoint scan_all_watchpoint;
375
376         uint32_t exception_address;
377         struct watchpoint *wp;
378         struct nds32 *nds32 = target_to_nds32(target);
379
380         exception_address = nds32->watched_address;
381
382         if (exception_address == 0xFFFFFFFF)
383                 return ERROR_FAIL;
384
385         if (exception_address == 0) {
386                 scan_all_watchpoint.address = 0;
387                 scan_all_watchpoint.rw = WPT_WRITE;
388                 scan_all_watchpoint.next = 0;
389                 scan_all_watchpoint.unique_id = 0x5CA8;
390
391                 *hit_watchpoint = &scan_all_watchpoint;
392                 return ERROR_OK;
393         }
394
395         for (wp = target->watchpoints; wp; wp = wp->next) {
396                 if (((exception_address ^ wp->address) & (~wp->mask)) == 0) {
397                         *hit_watchpoint = wp;
398
399                         return ERROR_OK;
400                 }
401         }
402
403         return ERROR_FAIL;
404 }
405
406 int nds32_v3_target_create_common(struct target *target, struct nds32 *nds32)
407 {
408         nds32->register_map = nds32_v3_register_mapping;
409         nds32->get_debug_reason = nds32_v3_get_debug_reason;
410         nds32->enter_debug_state = nds32_v3_debug_entry;
411         nds32->leave_debug_state = nds32_v3_leave_debug_state;
412         nds32->get_watched_address = nds32_v3_get_exception_address;
413
414         /* Init target->arch_info in nds32_init_arch_info().
415          * After this, user could use target_to_nds32() to get nds32 object */
416         nds32_init_arch_info(target, nds32);
417
418         return ERROR_OK;
419 }
420
421 int nds32_v3_run_algorithm(struct target *target,
422                 int num_mem_params,
423                 struct mem_param *mem_params,
424                 int num_reg_params,
425                 struct reg_param *reg_params,
426                 target_addr_t entry_point,
427                 target_addr_t exit_point,
428                 int timeout_ms,
429                 void *arch_info)
430 {
431         LOG_WARNING("Not implemented: %s", __func__);
432
433         return ERROR_FAIL;
434 }
435
436 int nds32_v3_read_buffer(struct target *target, target_addr_t address,
437                 uint32_t size, uint8_t *buffer)
438 {
439         struct nds32 *nds32 = target_to_nds32(target);
440         struct nds32_memory *memory = &(nds32->memory);
441
442         if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
443                         (target->state != TARGET_HALTED)) {
444                 LOG_WARNING("target was not halted");
445                 return ERROR_TARGET_NOT_HALTED;
446         }
447
448         target_addr_t physical_address;
449         /* BUG: If access range crosses multiple pages, the translation will not correct
450          * for second page or so. */
451
452         /* When DEX is set to one, hardware will enforce the following behavior without
453          * modifying the corresponding control bits in PSW.
454          *
455          * Disable all interrupts
456          * Become superuser mode
457          * Turn off IT/DT
458          * Use MMU_CFG.DE as the data access endian
459          * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
460          * Disable audio special features
461          * Disable inline function call
462          *
463          * Because hardware will turn off IT/DT by default, it MUST translate virtual address
464          * to physical address.
465          */
466         if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
467                 address = physical_address;
468         else
469                 return ERROR_FAIL;
470
471         int result;
472         struct aice_port_s *aice = target_to_aice(target);
473         /* give arbitrary initial value to avoid warning messages */
474         enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
475
476         if (nds32->hit_syscall) {
477                 /* Use bus mode to access memory during virtual hosting */
478                 origin_access_channel = memory->access_channel;
479                 memory->access_channel = NDS_MEMORY_ACC_BUS;
480                 aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
481         }
482
483         result = nds32_read_buffer(target, address, size, buffer);
484
485         if (nds32->hit_syscall) {
486                 /* Restore access_channel after virtual hosting */
487                 memory->access_channel = origin_access_channel;
488                 aice_memory_access(aice, origin_access_channel);
489         }
490
491         return result;
492 }
493
494 int nds32_v3_write_buffer(struct target *target, target_addr_t address,
495                 uint32_t size, const uint8_t *buffer)
496 {
497         struct nds32 *nds32 = target_to_nds32(target);
498         struct nds32_memory *memory = &(nds32->memory);
499
500         if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
501                         (target->state != TARGET_HALTED)) {
502                 LOG_WARNING("target was not halted");
503                 return ERROR_TARGET_NOT_HALTED;
504         }
505
506         target_addr_t physical_address;
507         /* BUG: If access range crosses multiple pages, the translation will not correct
508          * for second page or so. */
509
510         /* When DEX is set to one, hardware will enforce the following behavior without
511          * modifying the corresponding control bits in PSW.
512          *
513          * Disable all interrupts
514          * Become superuser mode
515          * Turn off IT/DT
516          * Use MMU_CFG.DE as the data access endian
517          * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
518          * Disable audio special features
519          * Disable inline function call
520          *
521          * Because hardware will turn off IT/DT by default, it MUST translate virtual address
522          * to physical address.
523          */
524         if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
525                 address = physical_address;
526         else
527                 return ERROR_FAIL;
528
529         if (nds32->hit_syscall) {
530                 struct aice_port_s *aice = target_to_aice(target);
531                 enum nds_memory_access origin_access_channel;
532                 origin_access_channel = memory->access_channel;
533
534                 /* If target has no cache, use BUS mode to access memory. */
535                 if ((memory->dcache.line_size == 0)
536                         || (memory->dcache.enable == false)) {
537                         /* There is no Dcache or Dcache is disabled. */
538                         memory->access_channel = NDS_MEMORY_ACC_BUS;
539                         aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
540                 }
541
542                 int result;
543                 result = nds32_gdb_fileio_write_memory(nds32, address, size, buffer);
544
545                 if (origin_access_channel == NDS_MEMORY_ACC_CPU) {
546                         memory->access_channel = NDS_MEMORY_ACC_CPU;
547                         aice_memory_access(aice, NDS_MEMORY_ACC_CPU);
548                 }
549
550                 return result;
551         }
552
553         return nds32_write_buffer(target, address, size, buffer);
554 }
555
556 int nds32_v3_read_memory(struct target *target, target_addr_t address,
557                 uint32_t size, uint32_t count, uint8_t *buffer)
558 {
559         struct nds32 *nds32 = target_to_nds32(target);
560         struct nds32_memory *memory = &(nds32->memory);
561
562         if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
563                         (target->state != TARGET_HALTED)) {
564                 LOG_WARNING("target was not halted");
565                 return ERROR_TARGET_NOT_HALTED;
566         }
567
568         target_addr_t physical_address;
569         /* BUG: If access range crosses multiple pages, the translation will not correct
570          * for second page or so. */
571
572         /* When DEX is set to one, hardware will enforce the following behavior without
573          * modifying the corresponding control bits in PSW.
574          *
575          * Disable all interrupts
576          * Become superuser mode
577          * Turn off IT/DT
578          * Use MMU_CFG.DE as the data access endian
579          * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
580          * Disable audio special features
581          * Disable inline function call
582          *
583          * Because hardware will turn off IT/DT by default, it MUST translate virtual address
584          * to physical address.
585          */
586         if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
587                 address = physical_address;
588         else
589                 return ERROR_FAIL;
590
591         struct aice_port_s *aice = target_to_aice(target);
592         /* give arbitrary initial value to avoid warning messages */
593         enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
594         int result;
595
596         if (nds32->hit_syscall) {
597                 /* Use bus mode to access memory during virtual hosting */
598                 origin_access_channel = memory->access_channel;
599                 memory->access_channel = NDS_MEMORY_ACC_BUS;
600                 aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
601         }
602
603         result = nds32_read_memory(target, address, size, count, buffer);
604
605         if (nds32->hit_syscall) {
606                 /* Restore access_channel after virtual hosting */
607                 memory->access_channel = origin_access_channel;
608                 aice_memory_access(aice, origin_access_channel);
609         }
610
611         return result;
612 }
613
614 int nds32_v3_write_memory(struct target *target, target_addr_t address,
615                 uint32_t size, uint32_t count, const uint8_t *buffer)
616 {
617         struct nds32 *nds32 = target_to_nds32(target);
618         struct nds32_memory *memory = &(nds32->memory);
619
620         if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
621                         (target->state != TARGET_HALTED)) {
622                 LOG_WARNING("target was not halted");
623                 return ERROR_TARGET_NOT_HALTED;
624         }
625
626         target_addr_t physical_address;
627         /* BUG: If access range crosses multiple pages, the translation will not correct
628          * for second page or so. */
629
630         /* When DEX is set to one, hardware will enforce the following behavior without
631          * modifying the corresponding control bits in PSW.
632          *
633          * Disable all interrupts
634          * Become superuser mode
635          * Turn off IT/DT
636          * Use MMU_CFG.DE as the data access endian
637          * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
638          * Disable audio special features
639          * Disable inline function call
640          *
641          * Because hardware will turn off IT/DT by default, it MUST translate virtual address
642          * to physical address.
643          */
644         if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
645                 address = physical_address;
646         else
647                 return ERROR_FAIL;
648
649         return nds32_write_memory(target, address, size, count, buffer);
650 }
651
652 int nds32_v3_init_target(struct command_context *cmd_ctx,
653                 struct target *target)
654 {
655         /* Initialize anything we can set up without talking to the target */
656         struct nds32 *nds32 = target_to_nds32(target);
657
658         nds32_init(nds32);
659
660         target->fileio_info = malloc(sizeof(struct gdb_fileio_info));
661         target->fileio_info->identifier = NULL;
662
663         return ERROR_OK;
664 }