linux/arch/arm/mm/proc-xscale.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/proc-xscale.S
   3 *
   4 *  Author:     Nicolas Pitre
   5 *  Created:    November 2000
   6 *  Copyright:  (C) 2000, 2001 MontaVista Software Inc.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * MMU functions for the Intel XScale CPUs
  13 *
  14 * 2001 Aug 21:
  15 *      some contributions by Brett Gaines <brett.w.gaines@intel.com>
  16 *      Copyright 2001 by Intel Corp.
  17 *
  18 * 2001 Sep 08:
  19 *      Completely revisited, many important fixes
  20 *      Nicolas Pitre <nico@fluxnic.net>
  21 */
  22
  23#include <linux/linkage.h>
  24#include <linux/init.h>
  25#include <asm/assembler.h>
  26#include <asm/hwcap.h>
  27#include <asm/pgtable.h>
  28#include <asm/pgtable-hwdef.h>
  29#include <asm/page.h>
  30#include <asm/ptrace.h>
  31#include "proc-macros.S"
  32
  33/*
  34 * This is the maximum size of an area which will be flushed.  If the area
  35 * is larger than this, then we flush the whole cache
  36 */
  37#define MAX_AREA_SIZE   32768
  38
  39/*
  40 * the cache line size of the I and D cache
  41 */
  42#define CACHELINESIZE   32
  43
  44/*
  45 * the size of the data cache
  46 */
  47#define CACHESIZE       32768
  48
  49/*
  50 * Virtual address used to allocate the cache when flushed
  51 *
  52 * This must be an address range which is _never_ used.  It should
  53 * apparently have a mapping in the corresponding page table for
  54 * compatibility with future CPUs that _could_ require it.  For instance we
  55 * don't care.
  56 *
  57 * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
  58 * the 2 areas in alternance each time the clean_d_cache macro is used.
  59 * Without this the XScale core exhibits cache eviction problems and no one
  60 * knows why.
  61 *
  62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
  63 */
  64#define CLEAN_ADDR      0xfffe0000
  65
  66/*
  67 * This macro is used to wait for a CP15 write and is needed
  68 * when we have to ensure that the last operation to the co-pro
  69 * was completed before continuing with operation.
  70 */
  71        .macro  cpwait, rd
  72        mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
  73        mov     \rd, \rd                        @ wait for completion
  74        sub     pc, pc, #4                      @ flush instruction pipeline
  75        .endm
  76
  77        .macro  cpwait_ret, lr, rd
  78        mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
  79        sub     pc, \lr, \rd, LSR #32           @ wait for completion and
  80                                                @ flush instruction pipeline
  81        .endm
  82
  83/*
  84 * This macro cleans the entire dcache using line allocate.
  85 * The main loop has been unrolled to reduce loop overhead.
  86 * rd and rs are two scratch registers.
  87 */
  88        .macro  clean_d_cache, rd, rs
  89        ldr     \rs, =clean_addr
  90        ldr     \rd, [\rs]
  91        eor     \rd, \rd, #CACHESIZE
  92        str     \rd, [\rs]
  93        add     \rs, \rd, #CACHESIZE
  941:      mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
  95        add     \rd, \rd, #CACHELINESIZE
  96        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
  97        add     \rd, \rd, #CACHELINESIZE
  98        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
  99        add     \rd, \rd, #CACHELINESIZE
 100        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
 101        add     \rd, \rd, #CACHELINESIZE
 102        teq     \rd, \rs
 103        bne     1b
 104        .endm
 105
 106        .data
 107        .align  2
 108clean_addr:     .word   CLEAN_ADDR
 109
 110        .text
 111
 112/*
 113 * cpu_xscale_proc_init()
 114 *
 115 * Nothing too exciting at the moment
 116 */
 117ENTRY(cpu_xscale_proc_init)
 118        @ enable write buffer coalescing. Some bootloader disable it
 119        mrc     p15, 0, r1, c1, c0, 1
 120        bic     r1, r1, #1
 121        mcr     p15, 0, r1, c1, c0, 1
 122        ret     lr
 123
 124/*
 125 * cpu_xscale_proc_fin()
 126 */
 127ENTRY(cpu_xscale_proc_fin)
 128        mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
 129        bic     r0, r0, #0x1800                 @ ...IZ...........
 130        bic     r0, r0, #0x0006                 @ .............CA.
 131        mcr     p15, 0, r0, c1, c0, 0           @ disable caches
 132        ret     lr
 133
 134/*
 135 * cpu_xscale_reset(loc)
 136 *
 137 * Perform a soft reset of the system.  Put the CPU into the
 138 * same state as it would be if it had been reset, and branch
 139 * to what would be the reset vector.
 140 *
 141 * loc: location to jump to for soft reset
 142 *
 143 * Beware PXA270 erratum E7.
 144 */
 145        .align  5
 146        .pushsection    .idmap.text, "ax"
 147ENTRY(cpu_xscale_reset)
 148        mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
 149        msr     cpsr_c, r1                      @ reset CPSR
 150        mcr     p15, 0, r1, c10, c4, 1          @ unlock I-TLB
 151        mcr     p15, 0, r1, c8, c5, 0           @ invalidate I-TLB
 152        mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
 153        bic     r1, r1, #0x0086                 @ ........B....CA.
 154        bic     r1, r1, #0x3900                 @ ..VIZ..S........
 155        sub     pc, pc, #4                      @ flush pipeline
 156        @ *** cache line aligned ***
 157        mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
 158        bic     r1, r1, #0x0001                 @ ...............M
 159        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches & BTB
 160        mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
 161        @ CAUTION: MMU turned off from this point. We count on the pipeline
 162        @ already containing those two last instructions to survive.
 163        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
 164        ret     r0
 165ENDPROC(cpu_xscale_reset)
 166        .popsection
 167
 168/*
 169 * cpu_xscale_do_idle()
 170 *
 171 * Cause the processor to idle
 172 *
 173 * For now we do nothing but go to idle mode for every case
 174 *
 175 * XScale supports clock switching, but using idle mode support
 176 * allows external hardware to react to system state changes.
 177 */
 178        .align  5
 179
 180ENTRY(cpu_xscale_do_idle)
 181        mov     r0, #1
 182        mcr     p14, 0, r0, c7, c0, 0           @ Go to IDLE
 183        ret     lr
 184
 185/* ================================= CACHE ================================ */
 186
 187/*
 188 *      flush_icache_all()
 189 *
 190 *      Unconditionally clean and invalidate the entire icache.
 191 */
 192ENTRY(xscale_flush_icache_all)
 193        mov     r0, #0
 194        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
 195        ret     lr
 196ENDPROC(xscale_flush_icache_all)
 197
 198/*
 199 *      flush_user_cache_all()
 200 *
 201 *      Invalidate all cache entries in a particular address
 202 *      space.
 203 */
 204ENTRY(xscale_flush_user_cache_all)
 205        /* FALLTHROUGH */
 206
 207/*
 208 *      flush_kern_cache_all()
 209 *
 210 *      Clean and invalidate the entire cache.
 211 */
 212ENTRY(xscale_flush_kern_cache_all)
 213        mov     r2, #VM_EXEC
 214        mov     ip, #0
 215__flush_whole_cache:
 216        clean_d_cache r0, r1
 217        tst     r2, #VM_EXEC
 218        mcrne   p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
 219        mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
 220        ret     lr
 221
 222/*
 223 *      flush_user_cache_range(start, end, vm_flags)
 224 *
 225 *      Invalidate a range of cache entries in the specified
 226 *      address space.
 227 *
 228 *      - start - start address (may not be aligned)
 229 *      - end   - end address (exclusive, may not be aligned)
 230 *      - vma   - vma_area_struct describing address space
 231 */
 232        .align  5
 233ENTRY(xscale_flush_user_cache_range)
 234        mov     ip, #0
 235        sub     r3, r1, r0                      @ calculate total size
 236        cmp     r3, #MAX_AREA_SIZE
 237        bhs     __flush_whole_cache
 238
 2391:      tst     r2, #VM_EXEC
 240        mcrne   p15, 0, r0, c7, c5, 1           @ Invalidate I cache line
 241        mcr     p15, 0, r0, c7, c10, 1          @ Clean D cache line
 242        mcr     p15, 0, r0, c7, c6, 1           @ Invalidate D cache line
 243        add     r0, r0, #CACHELINESIZE
 244        cmp     r0, r1
 245        blo     1b
 246        tst     r2, #VM_EXEC
 247        mcrne   p15, 0, ip, c7, c5, 6           @ Invalidate BTB
 248        mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
 249        ret     lr
 250
 251/*
 252 *      coherent_kern_range(start, end)
 253 *
 254 *      Ensure coherency between the Icache and the Dcache in the
 255 *      region described by start.  If you have non-snooping
 256 *      Harvard caches, you need to implement this function.
 257 *
 258 *      - start  - virtual start address
 259 *      - end    - virtual end address
 260 *
 261 *      Note: single I-cache line invalidation isn't used here since
 262 *      it also trashes the mini I-cache used by JTAG debuggers.
 263 */
 264ENTRY(xscale_coherent_kern_range)
 265        bic     r0, r0, #CACHELINESIZE - 1
 2661:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 267        add     r0, r0, #CACHELINESIZE
 268        cmp     r0, r1
 269        blo     1b
 270        mov     r0, #0
 271        mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
 272        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 273        ret     lr
 274
 275/*
 276 *      coherent_user_range(start, end)
 277 *
 278 *      Ensure coherency between the Icache and the Dcache in the
 279 *      region described by start.  If you have non-snooping
 280 *      Harvard caches, you need to implement this function.
 281 *
 282 *      - start  - virtual start address
 283 *      - end    - virtual end address
 284 */
 285ENTRY(xscale_coherent_user_range)
 286        bic     r0, r0, #CACHELINESIZE - 1
 2871:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 288        mcr     p15, 0, r0, c7, c5, 1           @ Invalidate I cache entry
 289        add     r0, r0, #CACHELINESIZE
 290        cmp     r0, r1
 291        blo     1b
 292        mov     r0, #0
 293        mcr     p15, 0, r0, c7, c5, 6           @ Invalidate BTB
 294        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 295        ret     lr
 296
 297/*
 298 *      flush_kern_dcache_area(void *addr, size_t size)
 299 *
 300 *      Ensure no D cache aliasing occurs, either with itself or
 301 *      the I cache
 302 *
 303 *      - addr  - kernel address
 304 *      - size  - region size
 305 */
 306ENTRY(xscale_flush_kern_dcache_area)
 307        add     r1, r0, r1
 3081:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 309        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 310        add     r0, r0, #CACHELINESIZE
 311        cmp     r0, r1
 312        blo     1b
 313        mov     r0, #0
 314        mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
 315        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 316        ret     lr
 317
 318/*
 319 *      dma_inv_range(start, end)
 320 *
 321 *      Invalidate (discard) the specified virtual address range.
 322 *      May not write back any entries.  If 'start' or 'end'
 323 *      are not cache line aligned, those lines must be written
 324 *      back.
 325 *
 326 *      - start  - virtual start address
 327 *      - end    - virtual end address
 328 */
 329xscale_dma_inv_range:
 330        tst     r0, #CACHELINESIZE - 1
 331        bic     r0, r0, #CACHELINESIZE - 1
 332        mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
 333        tst     r1, #CACHELINESIZE - 1
 334        mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
 3351:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 336        add     r0, r0, #CACHELINESIZE
 337        cmp     r0, r1
 338        blo     1b
 339        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 340        ret     lr
 341
 342/*
 343 *      dma_clean_range(start, end)
 344 *
 345 *      Clean the specified virtual address range.
 346 *
 347 *      - start  - virtual start address
 348 *      - end    - virtual end address
 349 */
 350xscale_dma_clean_range:
 351        bic     r0, r0, #CACHELINESIZE - 1
 3521:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 353        add     r0, r0, #CACHELINESIZE
 354        cmp     r0, r1
 355        blo     1b
 356        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 357        ret     lr
 358
 359/*
 360 *      dma_flush_range(start, end)
 361 *
 362 *      Clean and invalidate the specified virtual address range.
 363 *
 364 *      - start  - virtual start address
 365 *      - end    - virtual end address
 366 */
 367ENTRY(xscale_dma_flush_range)
 368        bic     r0, r0, #CACHELINESIZE - 1
 3691:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 370        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 371        add     r0, r0, #CACHELINESIZE
 372        cmp     r0, r1
 373        blo     1b
 374        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
 375        ret     lr
 376
 377/*
 378 *      dma_map_area(start, size, dir)
 379 *      - start - kernel virtual start address
 380 *      - size  - size of region
 381 *      - dir   - DMA direction
 382 */
 383ENTRY(xscale_dma_map_area)
 384        add     r1, r1, r0
 385        cmp     r2, #DMA_TO_DEVICE
 386        beq     xscale_dma_clean_range
 387        bcs     xscale_dma_inv_range
 388        b       xscale_dma_flush_range
 389ENDPROC(xscale_dma_map_area)
 390
 391/*
 392 *      dma_map_area(start, size, dir)
 393 *      - start - kernel virtual start address
 394 *      - size  - size of region
 395 *      - dir   - DMA direction
 396 */
 397ENTRY(xscale_80200_A0_A1_dma_map_area)
 398        add     r1, r1, r0
 399        teq     r2, #DMA_TO_DEVICE
 400        beq     xscale_dma_clean_range
 401        b       xscale_dma_flush_range
 402ENDPROC(xscale_80200_A0_A1_dma_map_area)
 403
 404/*
 405 *      dma_unmap_area(start, size, dir)
 406 *      - start - kernel virtual start address
 407 *      - size  - size of region
 408 *      - dir   - DMA direction
 409 */
 410ENTRY(xscale_dma_unmap_area)
 411        ret     lr
 412ENDPROC(xscale_dma_unmap_area)
 413
 414        .globl  xscale_flush_kern_cache_louis
 415        .equ    xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
 416
 417        @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 418        define_cache_functions xscale
 419
 420/*
 421 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
 422 * clear the dirty bits, which means that if we invalidate a dirty line,
 423 * the dirty data can still be written back to external memory later on.
 424 *
 425 * The recommended workaround is to always do a clean D-cache line before
 426 * doing an invalidate D-cache line, so on the affected processors,
 427 * dma_inv_range() is implemented as dma_flush_range().
 428 *
 429 * See erratum #25 of "Intel 80200 Processor Specification Update",
 430 * revision January 22, 2003, available at:
 431 *     http://www.intel.com/design/iio/specupdt/273415.htm
 432 */
 433.macro a0_alias basename
 434        .globl xscale_80200_A0_A1_\basename
 435        .type xscale_80200_A0_A1_\basename , %function
 436        .equ xscale_80200_A0_A1_\basename , xscale_\basename
 437.endm
 438
 439/*
 440 * Most of the cache functions are unchanged for these processor revisions.
 441 * Export suitable alias symbols for the unchanged functions:
 442 */
 443        a0_alias flush_icache_all
 444        a0_alias flush_user_cache_all
 445        a0_alias flush_kern_cache_all
 446        a0_alias flush_kern_cache_louis
 447        a0_alias flush_user_cache_range
 448        a0_alias coherent_kern_range
 449        a0_alias coherent_user_range
 450        a0_alias flush_kern_dcache_area
 451        a0_alias dma_flush_range
 452        a0_alias dma_unmap_area
 453
 454        @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 455        define_cache_functions xscale_80200_A0_A1
 456
 457ENTRY(cpu_xscale_dcache_clean_area)
 4581:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 459        add     r0, r0, #CACHELINESIZE
 460        subs    r1, r1, #CACHELINESIZE
 461        bhi     1b
 462        ret     lr
 463
 464/* =============================== PageTable ============================== */
 465
 466/*
 467 * cpu_xscale_switch_mm(pgd)
 468 *
 469 * Set the translation base pointer to be as described by pgd.
 470 *
 471 * pgd: new page tables
 472 */
 473        .align  5
 474ENTRY(cpu_xscale_switch_mm)
 475        clean_d_cache r1, r2
 476        mcr     p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
 477        mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
 478        mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
 479        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
 480        cpwait_ret lr, ip
 481
 482/*
 483 * cpu_xscale_set_pte_ext(ptep, pte, ext)
 484 *
 485 * Set a PTE and flush it out
 486 *
 487 * Errata 40: must set memory to write-through for user read-only pages.
 488 */
 489cpu_xscale_mt_table:
 490        .long   0x00                                            @ L_PTE_MT_UNCACHED
 491        .long   PTE_BUFFERABLE                                  @ L_PTE_MT_BUFFERABLE
 492        .long   PTE_CACHEABLE                                   @ L_PTE_MT_WRITETHROUGH
 493        .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_WRITEBACK
 494        .long   PTE_EXT_TEX(1) | PTE_BUFFERABLE                 @ L_PTE_MT_DEV_SHARED
 495        .long   0x00                                            @ unused
 496        .long   PTE_EXT_TEX(1) | PTE_CACHEABLE                  @ L_PTE_MT_MINICACHE
 497        .long   PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
 498        .long   0x00                                            @ unused
 499        .long   PTE_BUFFERABLE                                  @ L_PTE_MT_DEV_WC
 500        .long   0x00                                            @ unused
 501        .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_DEV_CACHED
 502        .long   0x00                                            @ L_PTE_MT_DEV_NONSHARED
 503        .long   0x00                                            @ unused
 504        .long   0x00                                            @ unused
 505        .long   0x00                                            @ unused
 506
 507        .align  5
 508ENTRY(cpu_xscale_set_pte_ext)
 509        xscale_set_pte_ext_prologue
 510
 511        @
 512        @ Erratum 40: must set memory to write-through for user read-only pages
 513        @
 514        and     ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
 515        teq     ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
 516
 517        moveq   r1, #L_PTE_MT_WRITETHROUGH
 518        and     r1, r1, #L_PTE_MT_MASK
 519        adr     ip, cpu_xscale_mt_table
 520        ldr     ip, [ip, r1]
 521        bic     r2, r2, #0x0c
 522        orr     r2, r2, ip
 523
 524        xscale_set_pte_ext_epilogue
 525        ret     lr
 526
 527        .ltorg
 528        .align
 529
 530.globl  cpu_xscale_suspend_size
 531.equ    cpu_xscale_suspend_size, 4 * 6
 532#ifdef CONFIG_ARM_CPU_SUSPEND
 533ENTRY(cpu_xscale_do_suspend)
 534        stmfd   sp!, {r4 - r9, lr}
 535        mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
 536        mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
 537        mrc     p15, 0, r6, c13, c0, 0  @ PID
 538        mrc     p15, 0, r7, c3, c0, 0   @ domain ID
 539        mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
 540        mrc     p15, 0, r9, c1, c0, 0   @ control reg
 541        bic     r4, r4, #2              @ clear frequency change bit
 542        stmia   r0, {r4 - r9}           @ store cp regs
 543        ldmfd   sp!, {r4 - r9, pc}
 544ENDPROC(cpu_xscale_do_suspend)
 545
 546ENTRY(cpu_xscale_do_resume)
 547        ldmia   r0, {r4 - r9}           @ load cp regs
 548        mov     ip, #0
 549        mcr     p15, 0, ip, c8, c7, 0   @ invalidate I & D TLBs
 550        mcr     p15, 0, ip, c7, c7, 0   @ invalidate I & D caches, BTB
 551        mcr     p14, 0, r4, c6, c0, 0   @ clock configuration, turbo mode.
 552        mcr     p15, 0, r5, c15, c1, 0  @ CP access reg
 553        mcr     p15, 0, r6, c13, c0, 0  @ PID
 554        mcr     p15, 0, r7, c3, c0, 0   @ domain ID
 555        mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
 556        mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
 557        mov     r0, r9                  @ control register
 558        b       cpu_resume_mmu
 559ENDPROC(cpu_xscale_do_resume)
 560#endif
 561
 562        .type   __xscale_setup, #function
 563__xscale_setup:
 564        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I, D caches & BTB
 565        mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
 566        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I, D TLBs
 567        mov     r0, #1 << 6                     @ cp6 for IOP3xx and Bulverde
 568        orr     r0, r0, #1 << 13                @ Its undefined whether this
 569        mcr     p15, 0, r0, c15, c1, 0          @ affects USR or SVC modes
 570
 571        adr     r5, xscale_crval
 572        ldmia   r5, {r5, r6}
 573        mrc     p15, 0, r0, c1, c0, 0           @ get control register
 574        bic     r0, r0, r5
 575        orr     r0, r0, r6
 576        ret     lr
 577        .size   __xscale_setup, . - __xscale_setup
 578
 579        /*
 580         *  R
 581         * .RVI ZFRS BLDP WCAM
 582         * ..11 1.01 .... .101
 583         * 
 584         */
 585        .type   xscale_crval, #object
 586xscale_crval:
 587        crval   clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
 588
 589        __INITDATA
 590
 591        @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
 592        define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
 593
 594        .section ".rodata"
 595
 596        string  cpu_arch_name, "armv5te"
 597        string  cpu_elf_name, "v5"
 598
 599        string  cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
 600        string  cpu_80200_name, "XScale-80200"
 601        string  cpu_80219_name, "XScale-80219"
 602        string  cpu_8032x_name, "XScale-IOP8032x Family"
 603        string  cpu_8033x_name, "XScale-IOP8033x Family"
 604        string  cpu_pxa250_name, "XScale-PXA250"
 605        string  cpu_pxa210_name, "XScale-PXA210"
 606        string  cpu_ixp42x_name, "XScale-IXP42x Family"
 607        string  cpu_ixp43x_name, "XScale-IXP43x Family"
 608        string  cpu_ixp46x_name, "XScale-IXP46x Family"
 609        string  cpu_ixp2400_name, "XScale-IXP2400"
 610        string  cpu_ixp2800_name, "XScale-IXP2800"
 611        string  cpu_pxa255_name, "XScale-PXA255"
 612        string  cpu_pxa270_name, "XScale-PXA270"
 613
 614        .align
 615
 616        .section ".proc.info.init", #alloc
 617
 618.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
 619        .type   __\name\()_proc_info,#object
 620__\name\()_proc_info:
 621        .long   \cpu_val
 622        .long   \cpu_mask
 623        .long   PMD_TYPE_SECT | \
 624                PMD_SECT_BUFFERABLE | \
 625                PMD_SECT_CACHEABLE | \
 626                PMD_SECT_AP_WRITE | \
 627                PMD_SECT_AP_READ
 628        .long   PMD_TYPE_SECT | \
 629                PMD_SECT_AP_WRITE | \
 630                PMD_SECT_AP_READ
 631        initfn  __xscale_setup, __\name\()_proc_info
 632        .long   cpu_arch_name
 633        .long   cpu_elf_name
 634        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
 635        .long   \cpu_name
 636        .long   xscale_processor_functions
 637        .long   v4wbi_tlb_fns
 638        .long   xscale_mc_user_fns
 639        .ifb \cache
 640                .long   xscale_cache_fns
 641        .else
 642                .long   \cache
 643        .endif
 644        .size   __\name\()_proc_info, . - __\name\()_proc_info
 645.endm
 646
 647        xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
 648                cache=xscale_80200_A0_A1_cache_fns
 649        xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
 650        xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
 651        xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
 652        xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
 653        xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
 654        xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
 655        xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
 656        xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
 657        xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
 658        xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
 659        xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
 660        xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
 661        xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
 662