linux/arch/arm/include/asm/tlbflush.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/tlbflush.h
   3 *
   4 *  Copyright (C) 1999-2003 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef _ASMARM_TLBFLUSH_H
  11#define _ASMARM_TLBFLUSH_H
  12
  13#ifdef CONFIG_MMU
  14
  15#include <asm/glue.h>
  16
  17#define TLB_V3_PAGE     (1 << 0)
  18#define TLB_V4_U_PAGE   (1 << 1)
  19#define TLB_V4_D_PAGE   (1 << 2)
  20#define TLB_V4_I_PAGE   (1 << 3)
  21#define TLB_V6_U_PAGE   (1 << 4)
  22#define TLB_V6_D_PAGE   (1 << 5)
  23#define TLB_V6_I_PAGE   (1 << 6)
  24
  25#define TLB_V3_FULL     (1 << 8)
  26#define TLB_V4_U_FULL   (1 << 9)
  27#define TLB_V4_D_FULL   (1 << 10)
  28#define TLB_V4_I_FULL   (1 << 11)
  29#define TLB_V6_U_FULL   (1 << 12)
  30#define TLB_V6_D_FULL   (1 << 13)
  31#define TLB_V6_I_FULL   (1 << 14)
  32
  33#define TLB_V6_U_ASID   (1 << 16)
  34#define TLB_V6_D_ASID   (1 << 17)
  35#define TLB_V6_I_ASID   (1 << 18)
  36
  37#define TLB_BTB         (1 << 28)
  38
  39/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
  40#define TLB_V7_UIS_PAGE (1 << 19)
  41#define TLB_V7_UIS_FULL (1 << 20)
  42#define TLB_V7_UIS_ASID (1 << 21)
  43
  44/* Inner Shareable BTB operation (ARMv7 MP extensions) */
  45#define TLB_V7_IS_BTB   (1 << 22)
  46
  47#define TLB_L2CLEAN_FR  (1 << 29)               /* Feroceon */
  48#define TLB_DCLEAN      (1 << 30)
  49#define TLB_WB          (1 << 31)
  50
  51/*
  52 *      MMU TLB Model
  53 *      =============
  54 *
  55 *      We have the following to choose from:
  56 *        v3    - ARMv3
  57 *        v4    - ARMv4 without write buffer
  58 *        v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  59 *        v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  60 *        fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
  61 *        fa    - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB))
  62 *        v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  63 *        v7wbi - identical to v6wbi
  64 */
  65#undef _TLB
  66#undef MULTI_TLB
  67
  68#ifdef CONFIG_SMP_ON_UP
  69#define MULTI_TLB 1
  70#endif
  71
  72#define v3_tlb_flags    (TLB_V3_FULL | TLB_V3_PAGE)
  73
  74#ifdef CONFIG_CPU_TLB_V3
  75# define v3_possible_flags      v3_tlb_flags
  76# define v3_always_flags        v3_tlb_flags
  77# ifdef _TLB
  78#  define MULTI_TLB 1
  79# else
  80#  define _TLB v3
  81# endif
  82#else
  83# define v3_possible_flags      0
  84# define v3_always_flags        (-1UL)
  85#endif
  86
  87#define v4_tlb_flags    (TLB_V4_U_FULL | TLB_V4_U_PAGE)
  88
  89#ifdef CONFIG_CPU_TLB_V4WT
  90# define v4_possible_flags      v4_tlb_flags
  91# define v4_always_flags        v4_tlb_flags
  92# ifdef _TLB
  93#  define MULTI_TLB 1
  94# else
  95#  define _TLB v4
  96# endif
  97#else
  98# define v4_possible_flags      0
  99# define v4_always_flags        (-1UL)
 100#endif
 101
 102#define fa_tlb_flags    (TLB_WB | TLB_BTB | TLB_DCLEAN | \
 103                         TLB_V4_U_FULL | TLB_V4_U_PAGE)
 104
 105#ifdef CONFIG_CPU_TLB_FA
 106# define fa_possible_flags      fa_tlb_flags
 107# define fa_always_flags        fa_tlb_flags
 108# ifdef _TLB
 109#  define MULTI_TLB 1
 110# else
 111#  define _TLB fa
 112# endif
 113#else
 114# define fa_possible_flags      0
 115# define fa_always_flags        (-1UL)
 116#endif
 117
 118#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
 119                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 120                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 121
 122#ifdef CONFIG_CPU_TLB_V4WBI
 123# define v4wbi_possible_flags   v4wbi_tlb_flags
 124# define v4wbi_always_flags     v4wbi_tlb_flags
 125# ifdef _TLB
 126#  define MULTI_TLB 1
 127# else
 128#  define _TLB v4wbi
 129# endif
 130#else
 131# define v4wbi_possible_flags   0
 132# define v4wbi_always_flags     (-1UL)
 133#endif
 134
 135#define fr_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
 136                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 137                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 138
 139#ifdef CONFIG_CPU_TLB_FEROCEON
 140# define fr_possible_flags      fr_tlb_flags
 141# define fr_always_flags        fr_tlb_flags
 142# ifdef _TLB
 143#  define MULTI_TLB 1
 144# else
 145#  define _TLB v4wbi
 146# endif
 147#else
 148# define fr_possible_flags      0
 149# define fr_always_flags        (-1UL)
 150#endif
 151
 152#define v4wb_tlb_flags  (TLB_WB | TLB_DCLEAN | \
 153                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 154                         TLB_V4_D_PAGE)
 155
 156#ifdef CONFIG_CPU_TLB_V4WB
 157# define v4wb_possible_flags    v4wb_tlb_flags
 158# define v4wb_always_flags      v4wb_tlb_flags
 159# ifdef _TLB
 160#  define MULTI_TLB 1
 161# else
 162#  define _TLB v4wb
 163# endif
 164#else
 165# define v4wb_possible_flags    0
 166# define v4wb_always_flags      (-1UL)
 167#endif
 168
 169#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
 170                         TLB_V6_I_FULL | TLB_V6_D_FULL | \
 171                         TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
 172                         TLB_V6_I_ASID | TLB_V6_D_ASID)
 173
 174#ifdef CONFIG_CPU_TLB_V6
 175# define v6wbi_possible_flags   v6wbi_tlb_flags
 176# define v6wbi_always_flags     v6wbi_tlb_flags
 177# ifdef _TLB
 178#  define MULTI_TLB 1
 179# else
 180#  define _TLB v6wbi
 181# endif
 182#else
 183# define v6wbi_possible_flags   0
 184# define v6wbi_always_flags     (-1UL)
 185#endif
 186
 187#define v7wbi_tlb_flags_smp     (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
 188                         TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
 189#define v7wbi_tlb_flags_up      (TLB_WB | TLB_DCLEAN | TLB_BTB | \
 190                         TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
 191
 192#ifdef CONFIG_CPU_TLB_V7
 193
 194# ifdef CONFIG_SMP_ON_UP
 195#  define v7wbi_possible_flags  (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
 196#  define v7wbi_always_flags    (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
 197# elif defined(CONFIG_SMP)
 198#  define v7wbi_possible_flags  v7wbi_tlb_flags_smp
 199#  define v7wbi_always_flags    v7wbi_tlb_flags_smp
 200# else
 201#  define v7wbi_possible_flags  v7wbi_tlb_flags_up
 202#  define v7wbi_always_flags    v7wbi_tlb_flags_up
 203# endif
 204# ifdef _TLB
 205#  define MULTI_TLB 1
 206# else
 207#  define _TLB v7wbi
 208# endif
 209#else
 210# define v7wbi_possible_flags   0
 211# define v7wbi_always_flags     (-1UL)
 212#endif
 213
 214#ifndef _TLB
 215#error Unknown TLB model
 216#endif
 217
 218#ifndef __ASSEMBLY__
 219
 220#include <linux/sched.h>
 221
 222struct cpu_tlb_fns {
 223        void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
 224        void (*flush_kern_range)(unsigned long, unsigned long);
 225        unsigned long tlb_flags;
 226};
 227
 228/*
 229 * Select the calling method
 230 */
 231#ifdef MULTI_TLB
 232
 233#define __cpu_flush_user_tlb_range      cpu_tlb.flush_user_range
 234#define __cpu_flush_kern_tlb_range      cpu_tlb.flush_kern_range
 235
 236#else
 237
 238#define __cpu_flush_user_tlb_range      __glue(_TLB,_flush_user_tlb_range)
 239#define __cpu_flush_kern_tlb_range      __glue(_TLB,_flush_kern_tlb_range)
 240
 241extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 242extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 243
 244#endif
 245
 246extern struct cpu_tlb_fns cpu_tlb;
 247
 248#define __cpu_tlb_flags                 cpu_tlb.tlb_flags
 249
 250/*
 251 *      TLB Management
 252 *      ==============
 253 *
 254 *      The arch/arm/mm/tlb-*.S files implement these methods.
 255 *
 256 *      The TLB specific code is expected to perform whatever tests it
 257 *      needs to determine if it should invalidate the TLB for each
 258 *      call.  Start addresses are inclusive and end addresses are
 259 *      exclusive; it is safe to round these addresses down.
 260 *
 261 *      flush_tlb_all()
 262 *
 263 *              Invalidate the entire TLB.
 264 *
 265 *      flush_tlb_mm(mm)
 266 *
 267 *              Invalidate all TLB entries in a particular address
 268 *              space.
 269 *              - mm    - mm_struct describing address space
 270 *
 271 *      flush_tlb_range(mm,start,end)
 272 *
 273 *              Invalidate a range of TLB entries in the specified
 274 *              address space.
 275 *              - mm    - mm_struct describing address space
 276 *              - start - start address (may not be aligned)
 277 *              - end   - end address (exclusive, may not be aligned)
 278 *
 279 *      flush_tlb_page(vaddr,vma)
 280 *
 281 *              Invalidate the specified page in the specified address range.
 282 *              - vaddr - virtual address (may not be aligned)
 283 *              - vma   - vma_struct describing address range
 284 *
 285 *      flush_kern_tlb_page(kaddr)
 286 *
 287 *              Invalidate the TLB entry for the specified page.  The address
 288 *              will be in the kernels virtual memory space.  Current uses
 289 *              only require the D-TLB to be invalidated.
 290 *              - kaddr - Kernel virtual memory address
 291 */
 292
 293/*
 294 * We optimise the code below by:
 295 *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 296 *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 297 *  - if we're going to need __cpu_tlb_flags, access it once and only once
 298 *
 299 * This allows us to build optimal assembly for the single-CPU type case,
 300 * and as close to optimal given the compiler constrants for multi-CPU
 301 * case.  We could do better for the multi-CPU case if the compiler
 302 * implemented the "%?" method, but this has been discontinued due to too
 303 * many people getting it wrong.
 304 */
 305#define possible_tlb_flags      (v3_possible_flags | \
 306                                 v4_possible_flags | \
 307                                 v4wbi_possible_flags | \
 308                                 fr_possible_flags | \
 309                                 v4wb_possible_flags | \
 310                                 fa_possible_flags | \
 311                                 v6wbi_possible_flags | \
 312                                 v7wbi_possible_flags)
 313
 314#define always_tlb_flags        (v3_always_flags & \
 315                                 v4_always_flags & \
 316                                 v4wbi_always_flags & \
 317                                 fr_always_flags & \
 318                                 v4wb_always_flags & \
 319                                 fa_always_flags & \
 320                                 v6wbi_always_flags & \
 321                                 v7wbi_always_flags)
 322
 323#define tlb_flag(f)     ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
 324
 325static inline void local_flush_tlb_all(void)
 326{
 327        const int zero = 0;
 328        const unsigned int __tlb_flag = __cpu_tlb_flags;
 329
 330        if (tlb_flag(TLB_WB))
 331                dsb();
 332
 333        if (tlb_flag(TLB_V3_FULL))
 334                asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
 335        if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
 336                asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
 337        if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
 338                asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
 339        if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
 340                asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 341        if (tlb_flag(TLB_V7_UIS_FULL))
 342                asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
 343
 344        if (tlb_flag(TLB_BTB)) {
 345                /* flush the branch target cache */
 346                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
 347                dsb();
 348                isb();
 349        }
 350        if (tlb_flag(TLB_V7_IS_BTB)) {
 351                /* flush the branch target cache */
 352                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
 353                dsb();
 354                isb();
 355        }
 356}
 357
 358static inline void local_flush_tlb_mm(struct mm_struct *mm)
 359{
 360        const int zero = 0;
 361        const int asid = ASID(mm);
 362        const unsigned int __tlb_flag = __cpu_tlb_flags;
 363
 364        if (tlb_flag(TLB_WB))
 365                dsb();
 366
 367        if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
 368                if (tlb_flag(TLB_V3_FULL))
 369                        asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
 370                if (tlb_flag(TLB_V4_U_FULL))
 371                        asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc");
 372                if (tlb_flag(TLB_V4_D_FULL))
 373                        asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
 374                if (tlb_flag(TLB_V4_I_FULL))
 375                        asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 376        }
 377        put_cpu();
 378
 379        if (tlb_flag(TLB_V6_U_ASID))
 380                asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc");
 381        if (tlb_flag(TLB_V6_D_ASID))
 382                asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
 383        if (tlb_flag(TLB_V6_I_ASID))
 384                asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
 385        if (tlb_flag(TLB_V7_UIS_ASID))
 386#ifdef CONFIG_ARM_ERRATA_720789
 387                asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
 388#else
 389                asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
 390#endif
 391
 392        if (tlb_flag(TLB_BTB)) {
 393                /* flush the branch target cache */
 394                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
 395                dsb();
 396        }
 397        if (tlb_flag(TLB_V7_IS_BTB)) {
 398                /* flush the branch target cache */
 399                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
 400                dsb();
 401                isb();
 402        }
 403}
 404
 405static inline void
 406local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 407{
 408        const int zero = 0;
 409        const unsigned int __tlb_flag = __cpu_tlb_flags;
 410
 411        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 412
 413        if (tlb_flag(TLB_WB))
 414                dsb();
 415
 416        if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
 417                if (tlb_flag(TLB_V3_PAGE))
 418                        asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
 419                if (tlb_flag(TLB_V4_U_PAGE))
 420                        asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
 421                if (tlb_flag(TLB_V4_D_PAGE))
 422                        asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
 423                if (tlb_flag(TLB_V4_I_PAGE))
 424                        asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
 425                if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 426                        asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 427        }
 428
 429        if (tlb_flag(TLB_V6_U_PAGE))
 430                asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc");
 431        if (tlb_flag(TLB_V6_D_PAGE))
 432                asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
 433        if (tlb_flag(TLB_V6_I_PAGE))
 434                asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
 435        if (tlb_flag(TLB_V7_UIS_PAGE))
 436#ifdef CONFIG_ARM_ERRATA_720789
 437                asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc");
 438#else
 439                asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
 440#endif
 441
 442        if (tlb_flag(TLB_BTB)) {
 443                /* flush the branch target cache */
 444                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
 445                dsb();
 446        }
 447        if (tlb_flag(TLB_V7_IS_BTB)) {
 448                /* flush the branch target cache */
 449                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
 450                dsb();
 451                isb();
 452        }
 453}
 454
 455static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 456{
 457        const int zero = 0;
 458        const unsigned int __tlb_flag = __cpu_tlb_flags;
 459
 460        kaddr &= PAGE_MASK;
 461
 462        if (tlb_flag(TLB_WB))
 463                dsb();
 464
 465        if (tlb_flag(TLB_V3_PAGE))
 466                asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc");
 467        if (tlb_flag(TLB_V4_U_PAGE))
 468                asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
 469        if (tlb_flag(TLB_V4_D_PAGE))
 470                asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
 471        if (tlb_flag(TLB_V4_I_PAGE))
 472                asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
 473        if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 474                asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 475
 476        if (tlb_flag(TLB_V6_U_PAGE))
 477                asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc");
 478        if (tlb_flag(TLB_V6_D_PAGE))
 479                asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
 480        if (tlb_flag(TLB_V6_I_PAGE))
 481                asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
 482        if (tlb_flag(TLB_V7_UIS_PAGE))
 483                asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
 484
 485        if (tlb_flag(TLB_BTB)) {
 486                /* flush the branch target cache */
 487                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
 488                dsb();
 489                isb();
 490        }
 491        if (tlb_flag(TLB_V7_IS_BTB)) {
 492                /* flush the branch target cache */
 493                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
 494                dsb();
 495                isb();
 496        }
 497}
 498
 499/*
 500 *      flush_pmd_entry
 501 *
 502 *      Flush a PMD entry (word aligned, or double-word aligned) to
 503 *      RAM if the TLB for the CPU we are running on requires this.
 504 *      This is typically used when we are creating PMD entries.
 505 *
 506 *      clean_pmd_entry
 507 *
 508 *      Clean (but don't drain the write buffer) if the CPU requires
 509 *      these operations.  This is typically used when we are removing
 510 *      PMD entries.
 511 */
 512static inline void flush_pmd_entry(pmd_t *pmd)
 513{
 514        const unsigned int __tlb_flag = __cpu_tlb_flags;
 515
 516        if (tlb_flag(TLB_DCLEAN))
 517                asm("mcr        p15, 0, %0, c7, c10, 1  @ flush_pmd"
 518                        : : "r" (pmd) : "cc");
 519
 520        if (tlb_flag(TLB_L2CLEAN_FR))
 521                asm("mcr        p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
 522                        : : "r" (pmd) : "cc");
 523
 524        if (tlb_flag(TLB_WB))
 525                dsb();
 526}
 527
 528static inline void clean_pmd_entry(pmd_t *pmd)
 529{
 530        const unsigned int __tlb_flag = __cpu_tlb_flags;
 531
 532        if (tlb_flag(TLB_DCLEAN))
 533                asm("mcr        p15, 0, %0, c7, c10, 1  @ flush_pmd"
 534                        : : "r" (pmd) : "cc");
 535
 536        if (tlb_flag(TLB_L2CLEAN_FR))
 537                asm("mcr        p15, 1, %0, c15, c9, 1  @ L2 flush_pmd"
 538                        : : "r" (pmd) : "cc");
 539}
 540
 541#undef tlb_flag
 542#undef always_tlb_flags
 543#undef possible_tlb_flags
 544
 545/*
 546 * Convert calls to our calling convention.
 547 */
 548#define local_flush_tlb_range(vma,start,end)    __cpu_flush_user_tlb_range(start,end,vma)
 549#define local_flush_tlb_kernel_range(s,e)       __cpu_flush_kern_tlb_range(s,e)
 550
 551#ifndef CONFIG_SMP
 552#define flush_tlb_all           local_flush_tlb_all
 553#define flush_tlb_mm            local_flush_tlb_mm
 554#define flush_tlb_page          local_flush_tlb_page
 555#define flush_tlb_kernel_page   local_flush_tlb_kernel_page
 556#define flush_tlb_range         local_flush_tlb_range
 557#define flush_tlb_kernel_range  local_flush_tlb_kernel_range
 558#else
 559extern void flush_tlb_all(void);
 560extern void flush_tlb_mm(struct mm_struct *mm);
 561extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 562extern void flush_tlb_kernel_page(unsigned long kaddr);
 563extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 564extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 565#endif
 566
 567/*
 568 * If PG_dcache_clean is not set for the page, we need to ensure that any
 569 * cache entries for the kernels virtual memory range are written
 570 * back to the page. On ARMv6 and later, the cache coherency is handled via
 571 * the set_pte_at() function.
 572 */
 573#if __LINUX_ARM_ARCH__ < 6
 574extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 575        pte_t *ptep);
 576#else
 577static inline void update_mmu_cache(struct vm_area_struct *vma,
 578                                    unsigned long addr, pte_t *ptep)
 579{
 580}
 581#endif
 582
 583#endif
 584
 585#endif /* CONFIG_MMU */
 586
 587#endif
 588