linux/arch/arm/mm/proc-arm922.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/proc-arm922.S: MMU functions for ARM922
   3 *
   4 *  Copyright (C) 1999,2000 ARM Limited
   5 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
   6 *  Copyright (C) 2001 Altera Corporation
   7 *  hacked for non-paged-MM by Hyok S. Choi, 2003.
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  22 *
  23 *
  24 * These are the low level assembler for performing cache and TLB
  25 * functions on the arm922.
  26 *
  27 *  CONFIG_CPU_ARM922_CPU_IDLE -> nohlt
  28 */
  29#include <linux/linkage.h>
  30#include <linux/init.h>
  31#include <asm/assembler.h>
  32#include <asm/hwcap.h>
  33#include <asm/pgtable-hwdef.h>
  34#include <asm/pgtable.h>
  35#include <asm/page.h>
  36#include <asm/ptrace.h>
  37#include "proc-macros.S"
  38
  39/*
  40 * The size of one data cache line.
  41 */
  42#define CACHE_DLINESIZE 32
  43
  44/*
  45 * The number of data cache segments.
  46 */
  47#define CACHE_DSEGMENTS 4
  48
  49/*
  50 * The number of lines in a cache segment.
  51 */
  52#define CACHE_DENTRIES  64
  53
  54/*
  55 * This is the size at which it becomes more efficient to
  56 * clean the whole cache, rather than using the individual
  57 * cache line maintenance instructions.  (I think this should
  58 * be 32768).
  59 */
  60#define CACHE_DLIMIT    8192
  61
  62
  63        .text
  64/*
  65 * cpu_arm922_proc_init()
  66 */
  67ENTRY(cpu_arm922_proc_init)
  68        ret     lr
  69
  70/*
  71 * cpu_arm922_proc_fin()
  72 */
  73ENTRY(cpu_arm922_proc_fin)
  74        mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
  75        bic     r0, r0, #0x1000                 @ ...i............
  76        bic     r0, r0, #0x000e                 @ ............wca.
  77        mcr     p15, 0, r0, c1, c0, 0           @ disable caches
  78        ret     lr
  79
  80/*
  81 * cpu_arm922_reset(loc)
  82 *
  83 * Perform a soft reset of the system.  Put the CPU into the
  84 * same state as it would be if it had been reset, and branch
  85 * to what would be the reset vector.
  86 *
  87 * loc: location to jump to for soft reset
  88 */
  89        .align  5
  90        .pushsection    .idmap.text, "ax"
  91ENTRY(cpu_arm922_reset)
  92        mov     ip, #0
  93        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
  94        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
  95#ifdef CONFIG_MMU
  96        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
  97#endif
  98        mrc     p15, 0, ip, c1, c0, 0           @ ctrl register
  99        bic     ip, ip, #0x000f                 @ ............wcam
 100        bic     ip, ip, #0x1100                 @ ...i...s........
 101        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
 102        ret     r0
 103ENDPROC(cpu_arm922_reset)
 104        .popsection
 105
 106/*
 107 * cpu_arm922_do_idle()
 108 */
 109        .align  5
 110ENTRY(cpu_arm922_do_idle)
 111        mcr     p15, 0, r0, c7, c0, 4           @ Wait for interrupt
 112        ret     lr
 113
 114
 115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 116
 117/*
 118 *      flush_icache_all()
 119 *
 120 *      Unconditionally clean and invalidate the entire icache.
 121 */
 122ENTRY(arm922_flush_icache_all)
 123        mov     r0, #0
 124        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
 125        ret     lr
 126ENDPROC(arm922_flush_icache_all)
 127
 128/*
 129 *      flush_user_cache_all()
 130 *
 131 *      Clean and invalidate all cache entries in a particular
 132 *      address space.
 133 */
 134ENTRY(arm922_flush_user_cache_all)
 135        /* FALLTHROUGH */
 136
 137/*
 138 *      flush_kern_cache_all()
 139 *
 140 *      Clean and invalidate the entire cache.
 141 */
 142ENTRY(arm922_flush_kern_cache_all)
 143        mov     r2, #VM_EXEC
 144        mov     ip, #0
 145__flush_whole_cache:
 146        mov     r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
 1471:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
 1482:      mcr     p15, 0, r3, c7, c14, 2          @ clean+invalidate D index
 149        subs    r3, r3, #1 << 26
 150        bcs     2b                              @ entries 63 to 0
 151        subs    r1, r1, #1 << 5
 152        bcs     1b                              @ segments 7 to 0
 153        tst     r2, #VM_EXEC
 154        mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
 155        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
 156        ret     lr
 157
 158/*
 159 *      flush_user_cache_range(start, end, flags)
 160 *
 161 *      Clean and invalidate a range of cache entries in the
 162 *      specified address range.
 163 *
 164 *      - start - start address (inclusive)
 165 *      - end   - end address (exclusive)
 166 *      - flags - vm_flags describing address space
 167 */
 168ENTRY(arm922_flush_user_cache_range)
 169        mov     ip, #0
 170        sub     r3, r1, r0                      @ calculate total size
 171        cmp     r3, #CACHE_DLIMIT
 172        bhs     __flush_whole_cache
 173
 1741:      mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
 175        tst     r2, #VM_EXEC
 176        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
 177        add     r0, r0, #CACHE_DLINESIZE
 178        cmp     r0, r1
 179        blo     1b
 180        tst     r2, #VM_EXEC
 181        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
 182        ret     lr
 183
 184/*
 185 *      coherent_kern_range(start, end)
 186 *
 187 *      Ensure coherency between the Icache and the Dcache in the
 188 *      region described by start, end.  If you have non-snooping
 189 *      Harvard caches, you need to implement this function.
 190 *
 191 *      - start - virtual start address
 192 *      - end   - virtual end address
 193 */
 194ENTRY(arm922_coherent_kern_range)
 195        /* FALLTHROUGH */
 196
 197/*
 198 *      coherent_user_range(start, end)
 199 *
 200 *      Ensure coherency between the Icache and the Dcache in the
 201 *      region described by start, end.  If you have non-snooping
 202 *      Harvard caches, you need to implement this function.
 203 *
 204 *      - start - virtual start address
 205 *      - end   - virtual end address
 206 */
 207ENTRY(arm922_coherent_user_range)
 208        bic     r0, r0, #CACHE_DLINESIZE - 1
 2091:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 210        mcr     p15, 0, r0, c7, c5, 1           @ invalidate I entry
 211        add     r0, r0, #CACHE_DLINESIZE
 212        cmp     r0, r1
 213        blo     1b
 214        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 215        mov     r0, #0
 216        ret     lr
 217
 218/*
 219 *      flush_kern_dcache_area(void *addr, size_t size)
 220 *
 221 *      Ensure no D cache aliasing occurs, either with itself or
 222 *      the I cache
 223 *
 224 *      - addr  - kernel address
 225 *      - size  - region size
 226 */
 227ENTRY(arm922_flush_kern_dcache_area)
 228        add     r1, r0, r1
 2291:      mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
 230        add     r0, r0, #CACHE_DLINESIZE
 231        cmp     r0, r1
 232        blo     1b
 233        mov     r0, #0
 234        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
 235        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 236        ret     lr
 237
 238/*
 239 *      dma_inv_range(start, end)
 240 *
 241 *      Invalidate (discard) the specified virtual address range.
 242 *      May not write back any entries.  If 'start' or 'end'
 243 *      are not cache line aligned, those lines must be written
 244 *      back.
 245 *
 246 *      - start - virtual start address
 247 *      - end   - virtual end address
 248 *
 249 * (same as v4wb)
 250 */
 251arm922_dma_inv_range:
 252        tst     r0, #CACHE_DLINESIZE - 1
 253        bic     r0, r0, #CACHE_DLINESIZE - 1
 254        mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
 255        tst     r1, #CACHE_DLINESIZE - 1
 256        mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
 2571:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 258        add     r0, r0, #CACHE_DLINESIZE
 259        cmp     r0, r1
 260        blo     1b
 261        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 262        ret     lr
 263
 264/*
 265 *      dma_clean_range(start, end)
 266 *
 267 *      Clean the specified virtual address range.
 268 *
 269 *      - start - virtual start address
 270 *      - end   - virtual end address
 271 *
 272 * (same as v4wb)
 273 */
 274arm922_dma_clean_range:
 275        bic     r0, r0, #CACHE_DLINESIZE - 1
 2761:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 277        add     r0, r0, #CACHE_DLINESIZE
 278        cmp     r0, r1
 279        blo     1b
 280        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 281        ret     lr
 282
 283/*
 284 *      dma_flush_range(start, end)
 285 *
 286 *      Clean and invalidate the specified virtual address range.
 287 *
 288 *      - start - virtual start address
 289 *      - end   - virtual end address
 290 */
 291ENTRY(arm922_dma_flush_range)
 292        bic     r0, r0, #CACHE_DLINESIZE - 1
 2931:      mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
 294        add     r0, r0, #CACHE_DLINESIZE
 295        cmp     r0, r1
 296        blo     1b
 297        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 298        ret     lr
 299
 300/*
 301 *      dma_map_area(start, size, dir)
 302 *      - start - kernel virtual start address
 303 *      - size  - size of region
 304 *      - dir   - DMA direction
 305 */
 306ENTRY(arm922_dma_map_area)
 307        add     r1, r1, r0
 308        cmp     r2, #DMA_TO_DEVICE
 309        beq     arm922_dma_clean_range
 310        bcs     arm922_dma_inv_range
 311        b       arm922_dma_flush_range
 312ENDPROC(arm922_dma_map_area)
 313
 314/*
 315 *      dma_unmap_area(start, size, dir)
 316 *      - start - kernel virtual start address
 317 *      - size  - size of region
 318 *      - dir   - DMA direction
 319 */
 320ENTRY(arm922_dma_unmap_area)
 321        ret     lr
 322ENDPROC(arm922_dma_unmap_area)
 323
 324        .globl  arm922_flush_kern_cache_louis
 325        .equ    arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
 326
 327        @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 328        define_cache_functions arm922
 329#endif
 330
 331
 332ENTRY(cpu_arm922_dcache_clean_area)
 333#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 3341:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 335        add     r0, r0, #CACHE_DLINESIZE
 336        subs    r1, r1, #CACHE_DLINESIZE
 337        bhi     1b
 338#endif
 339        ret     lr
 340
 341/* =============================== PageTable ============================== */
 342
 343/*
 344 * cpu_arm922_switch_mm(pgd)
 345 *
 346 * Set the translation base pointer to be as described by pgd.
 347 *
 348 * pgd: new page tables
 349 */
 350        .align  5
 351ENTRY(cpu_arm922_switch_mm)
 352#ifdef CONFIG_MMU
 353        mov     ip, #0
 354#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 355        mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
 356#else
 357@ && 'Clean & Invalidate whole DCache'
 358@ && Re-written to use Index Ops.
 359@ && Uses registers r1, r3 and ip
 360
 361        mov     r1, #(CACHE_DSEGMENTS - 1) << 5 @ 4 segments
 3621:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
 3632:      mcr     p15, 0, r3, c7, c14, 2          @ clean & invalidate D index
 364        subs    r3, r3, #1 << 26
 365        bcs     2b                              @ entries 63 to 0
 366        subs    r1, r1, #1 << 5
 367        bcs     1b                              @ segments 7 to 0
 368#endif
 369        mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
 370        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
 371        mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
 372        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
 373#endif
 374        ret     lr
 375
 376/*
 377 * cpu_arm922_set_pte_ext(ptep, pte, ext)
 378 *
 379 * Set a PTE and flush it out
 380 */
 381        .align  5
 382ENTRY(cpu_arm922_set_pte_ext)
 383#ifdef CONFIG_MMU
 384        armv3_set_pte_ext
 385        mov     r0, r0
 386        mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 387        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 388#endif /* CONFIG_MMU */
 389        ret     lr
 390
 391        .type   __arm922_setup, #function
 392__arm922_setup:
 393        mov     r0, #0
 394        mcr     p15, 0, r0, c7, c7              @ invalidate I,D caches on v4
 395        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer on v4
 396#ifdef CONFIG_MMU
 397        mcr     p15, 0, r0, c8, c7              @ invalidate I,D TLBs on v4
 398#endif
 399        adr     r5, arm922_crval
 400        ldmia   r5, {r5, r6}
 401        mrc     p15, 0, r0, c1, c0              @ get control register v4
 402        bic     r0, r0, r5
 403        orr     r0, r0, r6
 404        ret     lr
 405        .size   __arm922_setup, . - __arm922_setup
 406
 407        /*
 408         *  R
 409         * .RVI ZFRS BLDP WCAM
 410         * ..11 0001 ..11 0101
 411         * 
 412         */
 413        .type   arm922_crval, #object
 414arm922_crval:
 415        crval   clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
 416
 417        __INITDATA
 418        @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
 419        define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort
 420
 421        .section ".rodata"
 422
 423        string  cpu_arch_name, "armv4t"
 424        string  cpu_elf_name, "v4"
 425        string  cpu_arm922_name, "ARM922T"
 426
 427        .align
 428
 429        .section ".proc.info.init", #alloc
 430
 431        .type   __arm922_proc_info,#object
 432__arm922_proc_info:
 433        .long   0x41009220
 434        .long   0xff00fff0
 435        .long   PMD_TYPE_SECT | \
 436                PMD_SECT_BUFFERABLE | \
 437                PMD_SECT_CACHEABLE | \
 438                PMD_BIT4 | \
 439                PMD_SECT_AP_WRITE | \
 440                PMD_SECT_AP_READ
 441        .long   PMD_TYPE_SECT | \
 442                PMD_BIT4 | \
 443                PMD_SECT_AP_WRITE | \
 444                PMD_SECT_AP_READ
 445        initfn  __arm922_setup, __arm922_proc_info
 446        .long   cpu_arch_name
 447        .long   cpu_elf_name
 448        .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
 449        .long   cpu_arm922_name
 450        .long   arm922_processor_functions
 451        .long   v4wbi_tlb_fns
 452        .long   v4wb_user_fns
 453#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 454        .long   arm922_cache_fns
 455#else
 456        .long   v4wt_cache_fns
 457#endif
 458        .size   __arm922_proc_info, . - __arm922_proc_info
 459