arch: Added ARMv7R and Cortex-R4 support
[fw/openocd] / src / target / armv7a.c
1 /***************************************************************************
2  *    Copyright (C) 2009 by David Brownell                                 *
3  *                                                                         *
4  *    Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com       *
5  *                                                                         *
6  *   This program is free software; you can redistribute it and/or modify  *
7  *   it under the terms of the GNU General Public License as published by  *
8  *   the Free Software Foundation; either version 2 of the License, or     *
9  *   (at your option) any later version.                                   *
10  *                                                                         *
11  *   This program is distributed in the hope that it will be useful,       *
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
14  *   GNU General Public License for more details.                          *
15  *                                                                         *
16  *   You should have received a copy of the GNU General Public License     *
17  *   along with this program; if not, write to the                         *
18  *   Free Software Foundation, Inc.,                                       *
19  *   59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             *
20  ***************************************************************************/
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include <helper/replacements.h>
27
28 #include "armv7a.h"
29 #include "arm_disassembler.h"
30
31 #include "register.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
34
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38
39 #include "arm_opcodes.h"
40 #include "target.h"
41 #include "target_type.h"
42
43 static void armv7a_show_fault_registers(struct target *target)
44 {
45         uint32_t dfsr, ifsr, dfar, ifar;
46         struct armv7a_common *armv7a = target_to_armv7a(target);
47         struct arm_dpm *dpm = armv7a->arm.dpm;
48         int retval;
49
50         retval = dpm->prepare(dpm);
51         if (retval != ERROR_OK)
52                 return;
53
54         /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
55
56         /* c5/c0 - {data, instruction} fault status registers */
57         retval = dpm->instr_read_data_r0(dpm,
58                         ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
59                         &dfsr);
60         if (retval != ERROR_OK)
61                 goto done;
62
63         retval = dpm->instr_read_data_r0(dpm,
64                         ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
65                         &ifsr);
66         if (retval != ERROR_OK)
67                 goto done;
68
69         /* c6/c0 - {data, instruction} fault address registers */
70         retval = dpm->instr_read_data_r0(dpm,
71                         ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
72                         &dfar);
73         if (retval != ERROR_OK)
74                 goto done;
75
76         retval = dpm->instr_read_data_r0(dpm,
77                         ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
78                         &ifar);
79         if (retval != ERROR_OK)
80                 goto done;
81
82         LOG_USER("Data fault registers        DFSR: %8.8" PRIx32
83                 ", DFAR: %8.8" PRIx32, dfsr, dfar);
84         LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85                 ", IFAR: %8.8" PRIx32, ifsr, ifar);
86
87 done:
88         /* (void) */ dpm->finish(dpm);
89 }
90
91 static int armv7a_read_ttbcr(struct target *target)
92 {
93         struct armv7a_common *armv7a = target_to_armv7a(target);
94         struct arm_dpm *dpm = armv7a->arm.dpm;
95         uint32_t ttbcr;
96         int retval = dpm->prepare(dpm);
97         if (retval != ERROR_OK)
98                 goto done;
99         /*  MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
100         retval = dpm->instr_read_data_r0(dpm,
101                         ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
102                         &ttbcr);
103         if (retval != ERROR_OK)
104                 goto done;
105         armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7) != 0) ? 1 : 0;
106         armv7a->armv7a_mmu.ttbr0_mask  = 7 << (32 - ((ttbcr & 0x7)));
107 #if 0
108         LOG_INFO("ttb1 %s ,ttb0_mask %x",
109                 armv7a->armv7a_mmu.ttbr1_used ? "used" : "not used",
110                 armv7a->armv7a_mmu.ttbr0_mask);
111 #endif
112         if (armv7a->armv7a_mmu.ttbr1_used == 1) {
113                 LOG_INFO("SVC access above %x",
114                         (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
115                 armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
116         } else {
117                 /*  fix me , default is hard coded LINUX border  */
118                 armv7a->armv7a_mmu.os_border = 0xc0000000;
119         }
120 done:
121         dpm->finish(dpm);
122         return retval;
123 }
124
125
126 /*  method adapted to cortex A : reused arm v4 v5 method*/
127 int armv7a_mmu_translate_va(struct target *target,  uint32_t va, uint32_t *val)
128 {
129         uint32_t first_lvl_descriptor = 0x0;
130         uint32_t second_lvl_descriptor = 0x0;
131         int retval;
132         struct armv7a_common *armv7a = target_to_armv7a(target);
133         struct arm_dpm *dpm = armv7a->arm.dpm;
134         uint32_t ttb = 0;       /*  default ttb0 */
135         if (armv7a->armv7a_mmu.ttbr1_used == -1)
136                 armv7a_read_ttbcr(target);
137         if ((armv7a->armv7a_mmu.ttbr1_used) &&
138                 (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask))) {
139                 /*  select ttb 1 */
140                 ttb = 1;
141         }
142         retval = dpm->prepare(dpm);
143         if (retval != ERROR_OK)
144                 goto done;
145
146         /*  MRC p15,0,<Rt>,c2,c0,ttb */
147         retval = dpm->instr_read_data_r0(dpm,
148                         ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
149                         &ttb);
150         if (retval != ERROR_OK)
151                 return retval;
152         retval = armv7a->armv7a_mmu.read_physical_memory(target,
153                         (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
154                         4, 1, (uint8_t *)&first_lvl_descriptor);
155         if (retval != ERROR_OK)
156                 return retval;
157         first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
158                         &first_lvl_descriptor);
159         /*  reuse armv4_5 piece of code, specific armv7a changes may come later */
160         LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
161
162         if ((first_lvl_descriptor & 0x3) == 0) {
163                 LOG_ERROR("Address translation failure");
164                 return ERROR_TARGET_TRANSLATION_FAULT;
165         }
166
167
168         if ((first_lvl_descriptor & 0x3) == 2) {
169                 /* section descriptor */
170                 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
171                 return ERROR_OK;
172         }
173
174         if ((first_lvl_descriptor & 0x3) == 1) {
175                 /* coarse page table */
176                 retval = armv7a->armv7a_mmu.read_physical_memory(target,
177                                 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
178                                 4, 1, (uint8_t *)&second_lvl_descriptor);
179                 if (retval != ERROR_OK)
180                         return retval;
181         } else if ((first_lvl_descriptor & 0x3) == 3)   {
182                 /* fine page table */
183                 retval = armv7a->armv7a_mmu.read_physical_memory(target,
184                                 (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8),
185                                 4, 1, (uint8_t *)&second_lvl_descriptor);
186                 if (retval != ERROR_OK)
187                         return retval;
188         }
189
190         second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
191                         &second_lvl_descriptor);
192
193         LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
194
195         if ((second_lvl_descriptor & 0x3) == 0) {
196                 LOG_ERROR("Address translation failure");
197                 return ERROR_TARGET_TRANSLATION_FAULT;
198         }
199
200         if ((second_lvl_descriptor & 0x3) == 1) {
201                 /* large page descriptor */
202                 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
203                 return ERROR_OK;
204         }
205
206         if ((second_lvl_descriptor & 0x3) == 2) {
207                 /* small page descriptor */
208                 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
209                 return ERROR_OK;
210         }
211
212         if ((second_lvl_descriptor & 0x3) == 3) {
213                 *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff);
214                 return ERROR_OK;
215         }
216
217         /* should not happen */
218         LOG_ERROR("Address translation failure");
219         return ERROR_TARGET_TRANSLATION_FAULT;
220
221 done:
222         return retval;
223 }
224
225 /*  V7 method VA TO PA  */
226 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
227         uint32_t *val, int meminfo)
228 {
229         int retval = ERROR_FAIL;
230         struct armv7a_common *armv7a = target_to_armv7a(target);
231         struct arm_dpm *dpm = armv7a->arm.dpm;
232         uint32_t virt = va & ~0xfff;
233         uint32_t NOS, NS, INNER, OUTER;
234         *val = 0xdeadbeef;
235         retval = dpm->prepare(dpm);
236         if (retval != ERROR_OK)
237                 goto done;
238         /*  mmu must be enable in order to get a correct translation
239          *  use VA to PA CP15 register for conversion */
240         retval = dpm->instr_write_data_r0(dpm,
241                         ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
242                         virt);
243         if (retval != ERROR_OK)
244                 goto done;
245         retval = dpm->instr_read_data_r0(dpm,
246                         ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
247                         val);
248         /* decode memory attribute */
249         NOS = (*val >> 10) & 1; /*  Not Outer shareable */
250         NS = (*val >> 9) & 1;   /* Non secure */
251         INNER = (*val >> 4) &  0x7;
252         OUTER = (*val >> 2) & 0x3;
253
254         if (retval != ERROR_OK)
255                 goto done;
256         *val = (*val & ~0xfff)  +  (va & 0xfff);
257         if (*val == va)
258                 LOG_WARNING("virt = phys  : MMU disable !!");
259         if (meminfo) {
260                 LOG_INFO("%x : %x %s outer shareable %s secured",
261                         va, *val,
262                         NOS == 1 ? "not" : " ",
263                         NS == 1 ? "not" : "");
264                 switch (OUTER) {
265                         case 0:
266                                 LOG_INFO("outer: Non-Cacheable");
267                                 break;
268                         case 1:
269                                 LOG_INFO("outer: Write-Back, Write-Allocate");
270                                 break;
271                         case 2:
272                                 LOG_INFO("outer: Write-Through, No Write-Allocate");
273                                 break;
274                         case 3:
275                                 LOG_INFO("outer: Write-Back, no Write-Allocate");
276                                 break;
277                 }
278                 switch (INNER) {
279                         case 0:
280                                 LOG_INFO("inner: Non-Cacheable");
281                                 break;
282                         case 1:
283                                 LOG_INFO("inner: Strongly-ordered");
284                                 break;
285                         case 3:
286                                 LOG_INFO("inner: Device");
287                                 break;
288                         case 5:
289                                 LOG_INFO("inner: Write-Back, Write-Allocate");
290                                 break;
291                         case 6:
292                                 LOG_INFO("inner:  Write-Through");
293                                 break;
294                         case 7:
295                                 LOG_INFO("inner: Write-Back, no Write-Allocate");
296
297                         default:
298                                 LOG_INFO("inner: %x ???", INNER);
299                 }
300         }
301
302 done:
303         dpm->finish(dpm);
304
305         return retval;
306 }
307
308 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
309         struct armv7a_cache_common *armv7a_cache)
310 {
311         if (armv7a_cache->ctype == -1) {
312                 command_print(cmd_ctx, "cache not yet identified");
313                 return ERROR_OK;
314         }
315
316         command_print(cmd_ctx,
317                 "D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
318                 armv7a_cache->d_u_size.linelen,
319                 armv7a_cache->d_u_size.associativity,
320                 armv7a_cache->d_u_size.nsets,
321                 armv7a_cache->d_u_size.cachesize);
322
323         command_print(cmd_ctx,
324                 "I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
325                 armv7a_cache->i_size.linelen,
326                 armv7a_cache->i_size.associativity,
327                 armv7a_cache->i_size.nsets,
328                 armv7a_cache->i_size.cachesize);
329
330         return ERROR_OK;
331 }
332
333 static int _armv7a_flush_all_data(struct target *target)
334 {
335         struct armv7a_common *armv7a = target_to_armv7a(target);
336         struct arm_dpm *dpm = armv7a->arm.dpm;
337         struct armv7a_cachesize *d_u_size =
338                 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
339         int32_t c_way, c_index = d_u_size->index;
340         int retval;
341         /*  check that cache data is on at target halt */
342         if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
343                 LOG_INFO("flushed not performed :cache not on at target halt");
344                 return ERROR_OK;
345         }
346         retval = dpm->prepare(dpm);
347         if (retval != ERROR_OK)
348                 goto done;
349         do {
350                 c_way = d_u_size->way;
351                 do {
352                         uint32_t value = (c_index << d_u_size->index_shift)
353                                 | (c_way << d_u_size->way_shift);
354                         /*  DCCISW */
355                         /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
356                         retval = dpm->instr_write_data_r0(dpm,
357                                         ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
358                                         value);
359                         if (retval != ERROR_OK)
360                                 goto done;
361                         c_way -= 1;
362                 } while (c_way >= 0);
363                 c_index -= 1;
364         } while (c_index >= 0);
365         return retval;
366 done:
367         LOG_ERROR("flushed failed");
368         dpm->finish(dpm);
369         return retval;
370 }
371
372 static int  armv7a_flush_all_data(struct target *target)
373 {
374         int retval = ERROR_FAIL;
375         /*  check that armv7a_cache is correctly identify */
376         struct armv7a_common *armv7a = target_to_armv7a(target);
377         if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) {
378                 LOG_ERROR("trying to flush un-identified cache");
379                 return retval;
380         }
381
382         if (target->smp) {
383                 /*  look if all the other target have been flushed in order to flush level
384                  *  2 */
385                 struct target_list *head;
386                 struct target *curr;
387                 head = target->head;
388                 while (head != (struct target_list *)NULL) {
389                         curr = head->target;
390                         if (curr->state == TARGET_HALTED) {
391                                 LOG_INFO("Wait flushing data l1 on core %d", curr->coreid);
392                                 retval = _armv7a_flush_all_data(curr);
393                         }
394                         head = head->next;
395                 }
396         } else
397                 retval = _armv7a_flush_all_data(target);
398         return retval;
399 }
400
401 /* L2 is not specific to armv7a  a specific file is needed */
402 static int armv7a_l2x_flush_all_data(struct target *target)
403 {
404
405 #define L2X0_CLEAN_INV_WAY              0x7FC
406         int retval = ERROR_FAIL;
407         struct armv7a_common *armv7a = target_to_armv7a(target);
408         struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
409                 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
410         uint32_t base = l2x_cache->base;
411         uint32_t l2_way = l2x_cache->way;
412         uint32_t l2_way_val = (1 << l2_way) - 1;
413         retval = armv7a_flush_all_data(target);
414         if (retval != ERROR_OK)
415                 return retval;
416         retval = target->type->write_phys_memory(target,
417                         (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
418                         (uint32_t)4,
419                         (uint32_t)1,
420                         (uint8_t *)&l2_way_val);
421         return retval;
422 }
423
424 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
425         struct armv7a_cache_common *armv7a_cache)
426 {
427
428         struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
429                 (armv7a_cache->l2_cache);
430
431         if (armv7a_cache->ctype == -1) {
432                 command_print(cmd_ctx, "cache not yet identified");
433                 return ERROR_OK;
434         }
435
436         command_print(cmd_ctx,
437                 "L1 D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
438                 armv7a_cache->d_u_size.linelen,
439                 armv7a_cache->d_u_size.associativity,
440                 armv7a_cache->d_u_size.nsets,
441                 armv7a_cache->d_u_size.cachesize);
442
443         command_print(cmd_ctx,
444                 "L1 I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
445                 armv7a_cache->i_size.linelen,
446                 armv7a_cache->i_size.associativity,
447                 armv7a_cache->i_size.nsets,
448                 armv7a_cache->i_size.cachesize);
449         command_print(cmd_ctx, "L2 unified cache Base Address 0x%x, %d ways",
450                 l2x_cache->base, l2x_cache->way);
451
452
453         return ERROR_OK;
454 }
455
456
457 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
458 {
459         struct armv7a_l2x_cache *l2x_cache;
460         struct target_list *head = target->head;
461         struct target *curr;
462
463         struct armv7a_common *armv7a = target_to_armv7a(target);
464         l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
465         l2x_cache->base = base;
466         l2x_cache->way = way;
467         /*LOG_INFO("cache l2 initialized base %x  way %d",
468         l2x_cache->base,l2x_cache->way);*/
469         if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
470                 LOG_INFO("cache l2 already initialized\n");
471         armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void *) l2x_cache;
472         /*  initialize l1 / l2x cache function  */
473         armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
474                 = armv7a_l2x_flush_all_data;
475         armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
476                 armv7a_handle_l2x_cache_info_command;
477         /*  initialize all target in this cluster (smp target)
478          *  l2 cache must be configured after smp declaration */
479         while (head != (struct target_list *)NULL) {
480                 curr = head->target;
481                 if (curr != target) {
482                         armv7a = target_to_armv7a(curr);
483                         if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
484                                 LOG_ERROR("smp target : cache l2 already initialized\n");
485                         armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void *) l2x_cache;
486                         armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
487                                 armv7a_l2x_flush_all_data;
488                         armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
489                                 armv7a_handle_l2x_cache_info_command;
490                 }
491                 head = head->next;
492         }
493         return JIM_OK;
494 }
495
496 COMMAND_HANDLER(handle_cache_l2x)
497 {
498         struct target *target = get_current_target(CMD_CTX);
499         uint32_t base, way;
500         switch (CMD_ARGC) {
501                 case 0:
502                         return ERROR_COMMAND_SYNTAX_ERROR;
503                         break;
504                 case 2:
505                         /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
506                         COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
507                         COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
508
509                         /* AP address is in bits 31:24 of DP_SELECT */
510                         armv7a_l2x_cache_init(target, base, way);
511                         break;
512                 default:
513                         return ERROR_COMMAND_SYNTAX_ERROR;
514         }
515         return ERROR_OK;
516 }
517
518 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
519         struct armv7a_cache_common *armv7a_cache)
520 {
521         if (armv7a_cache->ctype == -1) {
522                 command_print(cmd_ctx, "cache not yet identified");
523                 return ERROR_OK;
524         }
525
526         if (armv7a_cache->display_cache_info)
527                 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
528         return ERROR_OK;
529 }
530
531 /*  retrieve core id cluster id  */
532 static int armv7a_read_mpidr(struct target *target)
533 {
534         int retval = ERROR_FAIL;
535         struct armv7a_common *armv7a = target_to_armv7a(target);
536         struct arm_dpm *dpm = armv7a->arm.dpm;
537         uint32_t mpidr;
538         retval = dpm->prepare(dpm);
539         if (retval != ERROR_OK)
540                 goto done;
541         /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
542
543         retval = dpm->instr_read_data_r0(dpm,
544                         ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
545                         &mpidr);
546         if (retval != ERROR_OK)
547                 goto done;
548         if (mpidr & 1<<31) {
549                 armv7a->multi_processor_system = (mpidr >> 30) & 1;
550                 armv7a->cluster_id = (mpidr >> 8) & 0xf;
551                 armv7a->cpu_id = mpidr & 0x3;
552                 LOG_INFO("%s cluster %x core %x %s", target_name(target),
553                         armv7a->cluster_id,
554                         armv7a->cpu_id,
555                         armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
556
557         } else
558                 LOG_ERROR("mpdir not in multiprocessor format");
559
560 done:
561         dpm->finish(dpm);
562         return retval;
563
564
565 }
566
567 int armv7a_identify_cache(struct target *target)
568 {
569         /*  read cache descriptor */
570         int retval = ERROR_FAIL;
571         struct armv7a_common *armv7a = target_to_armv7a(target);
572         struct arm_dpm *dpm = armv7a->arm.dpm;
573         uint32_t cache_selected, clidr;
574         uint32_t cache_i_reg, cache_d_reg;
575         struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
576         if (!armv7a->is_armv7r)
577                 armv7a_read_ttbcr(target);
578         retval = dpm->prepare(dpm);
579
580         if (retval != ERROR_OK)
581                 goto done;
582         /*  retrieve CLIDR
583          *  mrc p15, 1, r0, c0, c0, 1           @ read clidr */
584         retval = dpm->instr_read_data_r0(dpm,
585                         ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
586                         &clidr);
587         if (retval != ERROR_OK)
588                 goto done;
589         clidr = (clidr & 0x7000000) >> 23;
590         LOG_INFO("number of cache level %d", clidr / 2);
591         if ((clidr / 2) > 1) {
592                 /* FIXME not supported present in cortex A8 and later */
593                 /*  in cortex A7, A15 */
594                 LOG_ERROR("cache l2 present :not supported");
595         }
596         /*  retrieve selected cache
597          *  MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
598         retval = dpm->instr_read_data_r0(dpm,
599                         ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
600                         &cache_selected);
601         if (retval != ERROR_OK)
602                 goto done;
603
604         retval = armv7a->arm.mrc(target, 15,
605                         2, 0,   /* op1, op2 */
606                         0, 0,   /* CRn, CRm */
607                         &cache_selected);
608         if (retval != ERROR_OK)
609                 goto done;
610         /* select instruction cache
611          *  MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
612          *  [0]  : 1 instruction cache selection , 0 data cache selection */
613         retval = dpm->instr_write_data_r0(dpm,
614                         ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
615                         1);
616         if (retval != ERROR_OK)
617                 goto done;
618
619         /* read CCSIDR
620          * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
621          * [2:0] line size  001 eight word per line
622          * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
623         retval = dpm->instr_read_data_r0(dpm,
624                         ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
625                         &cache_i_reg);
626         if (retval != ERROR_OK)
627                 goto done;
628
629         /*  select data cache*/
630         retval = dpm->instr_write_data_r0(dpm,
631                         ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
632                         0);
633         if (retval != ERROR_OK)
634                 goto done;
635
636         retval = dpm->instr_read_data_r0(dpm,
637                         ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
638                         &cache_d_reg);
639         if (retval != ERROR_OK)
640                 goto done;
641
642         /*  restore selected cache  */
643         dpm->instr_write_data_r0(dpm,
644                 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
645                 cache_selected);
646
647         if (retval != ERROR_OK)
648                 goto done;
649         dpm->finish(dpm);
650
651         /* put fake type */
652         cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
653         cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
654         cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
655         cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) + 1;
656         /*  compute info for set way operation on cache */
657         cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
658         cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
659         cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
660         cache->d_u_size.way_shift = cache->d_u_size.way + 1;
661         {
662                 int i = 0;
663                 while (((cache->d_u_size.way_shift >> i) & 1) != 1)
664                         i++;
665                 cache->d_u_size.way_shift = 32-i;
666         }
667 #if 0
668         LOG_INFO("data cache index %d << %d, way %d << %d",
669                         cache->d_u_size.index, cache->d_u_size.index_shift,
670                         cache->d_u_size.way,
671                         cache->d_u_size.way_shift);
672
673         LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
674                         cache->d_u_size.linelen,
675                         cache->d_u_size.cachesize,
676                         cache->d_u_size.associativity);
677 #endif
678         cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
679         cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) + 1;
680         cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
681         cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
682         /*  compute info for set way operation on cache */
683         cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
684         cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
685         cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
686         cache->i_size.way_shift = cache->i_size.way + 1;
687         {
688                 int i = 0;
689                 while (((cache->i_size.way_shift >> i) & 1) != 1)
690                         i++;
691                 cache->i_size.way_shift = 32-i;
692         }
693 #if 0
694         LOG_INFO("instruction cache index %d << %d, way %d << %d",
695                         cache->i_size.index, cache->i_size.index_shift,
696                         cache->i_size.way, cache->i_size.way_shift);
697
698         LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
699                         cache->i_size.linelen,
700                         cache->i_size.cachesize,
701                         cache->i_size.associativity);
702 #endif
703         /*  if no l2 cache initialize l1 data cache flush function function */
704         if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
705                 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
706                         armv7a_handle_inner_cache_info_command;
707                 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
708                         armv7a_flush_all_data;
709         }
710         armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
711
712 done:
713         dpm->finish(dpm);
714         armv7a_read_mpidr(target);
715         return retval;
716
717 }
718
719 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
720 {
721         struct arm *arm = &armv7a->arm;
722         arm->arch_info = armv7a;
723         target->arch_info = &armv7a->arm;
724         /*  target is useful in all function arm v4 5 compatible */
725         armv7a->arm.target = target;
726         armv7a->arm.common_magic = ARM_COMMON_MAGIC;
727         armv7a->common_magic = ARMV7_COMMON_MAGIC;
728         armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
729         armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
730         armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
731         armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
732         return ERROR_OK;
733 }
734
735 int armv7a_arch_state(struct target *target)
736 {
737         static const char *state[] = {
738                 "disabled", "enabled"
739         };
740
741         struct armv7a_common *armv7a = target_to_armv7a(target);
742         struct arm *arm = &armv7a->arm;
743
744         if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
745                 LOG_ERROR("BUG: called for a non-ARMv7A target");
746                 return ERROR_COMMAND_SYNTAX_ERROR;
747         }
748
749         arm_arch_state(target);
750
751         if (armv7a->is_armv7r) {
752                 LOG_USER("D-Cache: %s, I-Cache: %s",
753                         state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
754                         state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
755         } else {
756                 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
757                         state[armv7a->armv7a_mmu.mmu_enabled],
758                         state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
759                         state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
760         }
761
762         if (arm->core_mode == ARM_MODE_ABT)
763                 armv7a_show_fault_registers(target);
764         if (target->debug_reason == DBG_REASON_WATCHPOINT)
765                 LOG_USER("Watchpoint triggered at PC %#08x",
766                         (unsigned) armv7a->dpm.wp_pc);
767
768         return ERROR_OK;
769 }
770
771 static const struct command_registration l2_cache_commands[] = {
772         {
773                 .name = "l2x",
774                 .handler = handle_cache_l2x,
775                 .mode = COMMAND_EXEC,
776                 .help = "configure l2x cache "
777                         "",
778                 .usage = "[base_addr] [number_of_way]",
779         },
780         COMMAND_REGISTRATION_DONE
781
782 };
783
784 const struct command_registration l2x_cache_command_handlers[] = {
785         {
786                 .name = "cache_config",
787                 .mode = COMMAND_EXEC,
788                 .help = "cache configuation for a target",
789                 .usage = "",
790                 .chain = l2_cache_commands,
791         },
792         COMMAND_REGISTRATION_DONE
793 };
794
795
796 const struct command_registration armv7a_command_handlers[] = {
797         {
798                 .chain = dap_command_handlers,
799         },
800         {
801                 .chain = l2x_cache_command_handlers,
802         },
803         COMMAND_REGISTRATION_DONE
804 };