linux/arch/arm/include/asm/tlbflush.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/tlbflush.h
   3 *
   4 *  Copyright (C) 1999-2003 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#ifndef _ASMARM_TLBFLUSH_H
  11#define _ASMARM_TLBFLUSH_H
  12
  13#ifndef __ASSEMBLY__
  14# include <linux/mm_types.h>
  15#endif
  16
  17#ifdef CONFIG_MMU
  18
  19#include <asm/glue.h>
  20
  21#define TLB_V4_U_PAGE   (1 << 1)
  22#define TLB_V4_D_PAGE   (1 << 2)
  23#define TLB_V4_I_PAGE   (1 << 3)
  24#define TLB_V6_U_PAGE   (1 << 4)
  25#define TLB_V6_D_PAGE   (1 << 5)
  26#define TLB_V6_I_PAGE   (1 << 6)
  27
  28#define TLB_V4_U_FULL   (1 << 9)
  29#define TLB_V4_D_FULL   (1 << 10)
  30#define TLB_V4_I_FULL   (1 << 11)
  31#define TLB_V6_U_FULL   (1 << 12)
  32#define TLB_V6_D_FULL   (1 << 13)
  33#define TLB_V6_I_FULL   (1 << 14)
  34
  35#define TLB_V6_U_ASID   (1 << 16)
  36#define TLB_V6_D_ASID   (1 << 17)
  37#define TLB_V6_I_ASID   (1 << 18)
  38
  39#define TLB_V6_BP       (1 << 19)
  40
  41/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
  42#define TLB_V7_UIS_PAGE (1 << 20)
  43#define TLB_V7_UIS_FULL (1 << 21)
  44#define TLB_V7_UIS_ASID (1 << 22)
  45#define TLB_V7_UIS_BP   (1 << 23)
  46
  47#define TLB_BARRIER     (1 << 28)
  48#define TLB_L2CLEAN_FR  (1 << 29)               /* Feroceon */
  49#define TLB_DCLEAN      (1 << 30)
  50#define TLB_WB          (1 << 31)
  51
  52/*
  53 *      MMU TLB Model
  54 *      =============
  55 *
  56 *      We have the following to choose from:
  57 *        v4    - ARMv4 without write buffer
  58 *        v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  59 *        v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  60 *        fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
  61 *        fa    - Faraday (v4 with write buffer with UTLB)
  62 *        v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  63 *        v7wbi - identical to v6wbi
  64 */
  65#undef _TLB
  66#undef MULTI_TLB
  67
  68#ifdef CONFIG_SMP_ON_UP
  69#define MULTI_TLB 1
  70#endif
  71
  72#define v4_tlb_flags    (TLB_V4_U_FULL | TLB_V4_U_PAGE)
  73
  74#ifdef CONFIG_CPU_TLB_V4WT
  75# define v4_possible_flags      v4_tlb_flags
  76# define v4_always_flags        v4_tlb_flags
  77# ifdef _TLB
  78#  define MULTI_TLB 1
  79# else
  80#  define _TLB v4
  81# endif
  82#else
  83# define v4_possible_flags      0
  84# define v4_always_flags        (-1UL)
  85#endif
  86
  87#define fa_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
  88                         TLB_V4_U_FULL | TLB_V4_U_PAGE)
  89
  90#ifdef CONFIG_CPU_TLB_FA
  91# define fa_possible_flags      fa_tlb_flags
  92# define fa_always_flags        fa_tlb_flags
  93# ifdef _TLB
  94#  define MULTI_TLB 1
  95# else
  96#  define _TLB fa
  97# endif
  98#else
  99# define fa_possible_flags      0
 100# define fa_always_flags        (-1UL)
 101#endif
 102
 103#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
 104                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 105                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 106
 107#ifdef CONFIG_CPU_TLB_V4WBI
 108# define v4wbi_possible_flags   v4wbi_tlb_flags
 109# define v4wbi_always_flags     v4wbi_tlb_flags
 110# ifdef _TLB
 111#  define MULTI_TLB 1
 112# else
 113#  define _TLB v4wbi
 114# endif
 115#else
 116# define v4wbi_possible_flags   0
 117# define v4wbi_always_flags     (-1UL)
 118#endif
 119
 120#define fr_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
 121                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 122                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 123
 124#ifdef CONFIG_CPU_TLB_FEROCEON
 125# define fr_possible_flags      fr_tlb_flags
 126# define fr_always_flags        fr_tlb_flags
 127# ifdef _TLB
 128#  define MULTI_TLB 1
 129# else
 130#  define _TLB v4wbi
 131# endif
 132#else
 133# define fr_possible_flags      0
 134# define fr_always_flags        (-1UL)
 135#endif
 136
 137#define v4wb_tlb_flags  (TLB_WB | TLB_DCLEAN | \
 138                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 139                         TLB_V4_D_PAGE)
 140
 141#ifdef CONFIG_CPU_TLB_V4WB
 142# define v4wb_possible_flags    v4wb_tlb_flags
 143# define v4wb_always_flags      v4wb_tlb_flags
 144# ifdef _TLB
 145#  define MULTI_TLB 1
 146# else
 147#  define _TLB v4wb
 148# endif
 149#else
 150# define v4wb_possible_flags    0
 151# define v4wb_always_flags      (-1UL)
 152#endif
 153
 154#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 155                         TLB_V6_I_FULL | TLB_V6_D_FULL | \
 156                         TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
 157                         TLB_V6_I_ASID | TLB_V6_D_ASID | \
 158                         TLB_V6_BP)
 159
 160#ifdef CONFIG_CPU_TLB_V6
 161# define v6wbi_possible_flags   v6wbi_tlb_flags
 162# define v6wbi_always_flags     v6wbi_tlb_flags
 163# ifdef _TLB
 164#  define MULTI_TLB 1
 165# else
 166#  define _TLB v6wbi
 167# endif
 168#else
 169# define v6wbi_possible_flags   0
 170# define v6wbi_always_flags     (-1UL)
 171#endif
 172
 173#define v7wbi_tlb_flags_smp     (TLB_WB | TLB_BARRIER | \
 174                                 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
 175                                 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 176#define v7wbi_tlb_flags_up      (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 177                                 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
 178                                 TLB_V6_U_ASID | TLB_V6_BP)
 179
 180#ifdef CONFIG_CPU_TLB_V7
 181
 182# ifdef CONFIG_SMP_ON_UP
 183#  define v7wbi_possible_flags  (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
 184#  define v7wbi_always_flags    (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
 185# elif defined(CONFIG_SMP)
 186#  define v7wbi_possible_flags  v7wbi_tlb_flags_smp
 187#  define v7wbi_always_flags    v7wbi_tlb_flags_smp
 188# else
 189#  define v7wbi_possible_flags  v7wbi_tlb_flags_up
 190#  define v7wbi_always_flags    v7wbi_tlb_flags_up
 191# endif
 192# ifdef _TLB
 193#  define MULTI_TLB 1
 194# else
 195#  define _TLB v7wbi
 196# endif
 197#else
 198# define v7wbi_possible_flags   0
 199# define v7wbi_always_flags     (-1UL)
 200#endif
 201
 202#ifndef _TLB
 203#error Unknown TLB model
 204#endif
 205
 206#ifndef __ASSEMBLY__
 207
 208#include <linux/sched.h>
 209
 210struct cpu_tlb_fns {
 211        void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
 212        void (*flush_kern_range)(unsigned long, unsigned long);
 213        unsigned long tlb_flags;
 214};
 215
 216/*
 217 * Select the calling method
 218 */
 219#ifdef MULTI_TLB
 220
 221#define __cpu_flush_user_tlb_range      cpu_tlb.flush_user_range
 222#define __cpu_flush_kern_tlb_range      cpu_tlb.flush_kern_range
 223
 224#else
 225
 226#define __cpu_flush_user_tlb_range      __glue(_TLB,_flush_user_tlb_range)
 227#define __cpu_flush_kern_tlb_range      __glue(_TLB,_flush_kern_tlb_range)
 228
 229extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 230extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 231
 232#endif
 233
 234extern struct cpu_tlb_fns cpu_tlb;
 235
 236#define __cpu_tlb_flags                 cpu_tlb.tlb_flags
 237
 238/*
 239 *      TLB Management
 240 *      ==============
 241 *
 242 *      The arch/arm/mm/tlb-*.S files implement these methods.
 243 *
 244 *      The TLB specific code is expected to perform whatever tests it
 245 *      needs to determine if it should invalidate the TLB for each
 246 *      call.  Start addresses are inclusive and end addresses are
 247 *      exclusive; it is safe to round these addresses down.
 248 *
 249 *      flush_tlb_all()
 250 *
 251 *              Invalidate the entire TLB.
 252 *
 253 *      flush_tlb_mm(mm)
 254 *
 255 *              Invalidate all TLB entries in a particular address
 256 *              space.
 257 *              - mm    - mm_struct describing address space
 258 *
 259 *      flush_tlb_range(mm,start,end)
 260 *
 261 *              Invalidate a range of TLB entries in the specified
 262 *              address space.
 263 *              - mm    - mm_struct describing address space
 264 *              - start - start address (may not be aligned)
 265 *              - end   - end address (exclusive, may not be aligned)
 266 *
 267 *      flush_tlb_page(vaddr,vma)
 268 *
 269 *              Invalidate the specified page in the specified address range.
 270 *              - vaddr - virtual address (may not be aligned)
 271 *              - vma   - vma_struct describing address range
 272 *
 273 *      flush_kern_tlb_page(kaddr)
 274 *
 275 *              Invalidate the TLB entry for the specified page.  The address
 276 *              will be in the kernels virtual memory space.  Current uses
 277 *              only require the D-TLB to be invalidated.
 278 *              - kaddr - Kernel virtual memory address
 279 */
 280
 281/*
 282 * We optimise the code below by:
 283 *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 284 *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 285 *  - if we're going to need __cpu_tlb_flags, access it once and only once
 286 *
 287 * This allows us to build optimal assembly for the single-CPU type case,
 288 * and as close to optimal given the compiler constrants for multi-CPU
 289 * case.  We could do better for the multi-CPU case if the compiler
 290 * implemented the "%?" method, but this has been discontinued due to too
 291 * many people getting it wrong.
 292 */
 293#define possible_tlb_flags      (v4_possible_flags | \
 294                                 v4wbi_possible_flags | \
 295                                 fr_possible_flags | \
 296                                 v4wb_possible_flags | \
 297                                 fa_possible_flags | \
 298                                 v6wbi_possible_flags | \
 299                                 v7wbi_possible_flags)
 300
 301#define always_tlb_flags        (v4_always_flags & \
 302                                 v4wbi_always_flags & \
 303                                 fr_always_flags & \
 304                                 v4wb_always_flags & \
 305                                 fa_always_flags & \
 306                                 v6wbi_always_flags & \
 307                                 v7wbi_always_flags)
 308
 309#define tlb_flag(f)     ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
 310
 311#define __tlb_op(f, insnarg, arg)                                       \
 312        do {                                                            \
 313                if (always_tlb_flags & (f))                             \
 314                        asm("mcr " insnarg                              \
 315                            : : "r" (arg) : "cc");                      \
 316                else if (possible_tlb_flags & (f))                      \
 317                        asm("tst %1, %2\n\t"                            \
 318                            "mcrne " insnarg                            \
 319                            : : "r" (arg), "r" (__tlb_flag), "Ir" (f)   \
 320                            : "cc");                                    \
 321        } while (0)
 322
 323#define tlb_op(f, regs, arg)    __tlb_op(f, "p15, 0, %0, " regs, arg)
 324#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
 325
 326static inline void __local_flush_tlb_all(void)
 327{
 328        const int zero = 0;
 329        const unsigned int __tlb_flag = __cpu_tlb_flags;
 330
 331        tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
 332        tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
 333        tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
 334}
 335
 336static inline void local_flush_tlb_all(void)
 337{
 338        const int zero = 0;
 339        const unsigned int __tlb_flag = __cpu_tlb_flags;
 340
 341        if (tlb_flag(TLB_WB))
 342                dsb(nshst);
 343
 344        __local_flush_tlb_all();
 345        tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
 346
 347        if (tlb_flag(TLB_BARRIER)) {
 348                dsb(nsh);
 349                isb();
 350        }
 351}
 352
 353static inline void __flush_tlb_all(void)
 354{
 355        const int zero = 0;
 356        const unsigned int __tlb_flag = __cpu_tlb_flags;
 357
 358        if (tlb_flag(TLB_WB))
 359                dsb(ishst);
 360
 361        __local_flush_tlb_all();
 362        tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
 363
 364        if (tlb_flag(TLB_BARRIER)) {
 365                dsb(ish);
 366                isb();
 367        }
 368}
 369
 370static inline void __local_flush_tlb_mm(struct mm_struct *mm)
 371{
 372        const int zero = 0;
 373        const int asid = ASID(mm);
 374        const unsigned int __tlb_flag = __cpu_tlb_flags;
 375
 376        if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
 377                if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
 378                        tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
 379                        tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
 380                        tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
 381                }
 382        }
 383
 384        tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
 385        tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
 386        tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
 387}
 388
 389static inline void local_flush_tlb_mm(struct mm_struct *mm)
 390{
 391        const int asid = ASID(mm);
 392        const unsigned int __tlb_flag = __cpu_tlb_flags;
 393
 394        if (tlb_flag(TLB_WB))
 395                dsb(nshst);
 396
 397        __local_flush_tlb_mm(mm);
 398        tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
 399
 400        if (tlb_flag(TLB_BARRIER))
 401                dsb(nsh);
 402}
 403
 404static inline void __flush_tlb_mm(struct mm_struct *mm)
 405{
 406        const unsigned int __tlb_flag = __cpu_tlb_flags;
 407
 408        if (tlb_flag(TLB_WB))
 409                dsb(ishst);
 410
 411        __local_flush_tlb_mm(mm);
 412#ifdef CONFIG_ARM_ERRATA_720789
 413        tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
 414#else
 415        tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
 416#endif
 417
 418        if (tlb_flag(TLB_BARRIER))
 419                dsb(ish);
 420}
 421
 422static inline void
 423__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 424{
 425        const int zero = 0;
 426        const unsigned int __tlb_flag = __cpu_tlb_flags;
 427
 428        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 429
 430        if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
 431            cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
 432                tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
 433                tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
 434                tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
 435                if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 436                        asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 437        }
 438
 439        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
 440        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
 441        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
 442}
 443
 444static inline void
 445local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 446{
 447        const unsigned int __tlb_flag = __cpu_tlb_flags;
 448
 449        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 450
 451        if (tlb_flag(TLB_WB))
 452                dsb(nshst);
 453
 454        __local_flush_tlb_page(vma, uaddr);
 455        tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
 456
 457        if (tlb_flag(TLB_BARRIER))
 458                dsb(nsh);
 459}
 460
 461static inline void
 462__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 463{
 464        const unsigned int __tlb_flag = __cpu_tlb_flags;
 465
 466        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 467
 468        if (tlb_flag(TLB_WB))
 469                dsb(ishst);
 470
 471        __local_flush_tlb_page(vma, uaddr);
 472#ifdef CONFIG_ARM_ERRATA_720789
 473        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
 474#else
 475        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
 476#endif
 477
 478        if (tlb_flag(TLB_BARRIER))
 479                dsb(ish);
 480}
 481
 482static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
 483{
 484        const int zero = 0;
 485        const unsigned int __tlb_flag = __cpu_tlb_flags;
 486
 487        tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
 488        tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
 489        tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
 490        if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 491                asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 492
 493        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
 494        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
 495        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
 496}
 497
 498static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 499{
 500        const unsigned int __tlb_flag = __cpu_tlb_flags;
 501
 502        kaddr &= PAGE_MASK;
 503
 504        if (tlb_flag(TLB_WB))
 505                dsb(nshst);
 506
 507        __local_flush_tlb_kernel_page(kaddr);
 508        tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
 509
 510        if (tlb_flag(TLB_BARRIER)) {
 511                dsb(nsh);
 512                isb();
 513        }
 514}
 515
 516static inline void __flush_tlb_kernel_page(unsigned long kaddr)
 517{
 518        const unsigned int __tlb_flag = __cpu_tlb_flags;
 519
 520        kaddr &= PAGE_MASK;
 521
 522        if (tlb_flag(TLB_WB))
 523                dsb(ishst);
 524
 525        __local_flush_tlb_kernel_page(kaddr);
 526        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
 527
 528        if (tlb_flag(TLB_BARRIER)) {
 529                dsb(ish);
 530                isb();
 531        }
 532}
 533
 534/*
 535 * Branch predictor maintenance is paired with full TLB invalidation, so
 536 * there is no need for any barriers here.
 537 */
 538static inline void __local_flush_bp_all(void)
 539{
 540        const int zero = 0;
 541        const unsigned int __tlb_flag = __cpu_tlb_flags;
 542
 543        if (tlb_flag(TLB_V6_BP))
 544                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 545}
 546
 547static inline void local_flush_bp_all(void)
 548{
 549        const int zero = 0;
 550        const unsigned int __tlb_flag = __cpu_tlb_flags;
 551
 552        __local_flush_bp_all();
 553        if (tlb_flag(TLB_V7_UIS_BP))
 554                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 555}
 556
 557static inline void __flush_bp_all(void)
 558{
 559        const int zero = 0;
 560        const unsigned int __tlb_flag = __cpu_tlb_flags;
 561
 562        __local_flush_bp_all();
 563        if (tlb_flag(TLB_V7_UIS_BP))
 564                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 565}
 566
 567/*
 568 *      flush_pmd_entry
 569 *
 570 *      Flush a PMD entry (word aligned, or double-word aligned) to
 571 *      RAM if the TLB for the CPU we are running on requires this.
 572 *      This is typically used when we are creating PMD entries.
 573 *
 574 *      clean_pmd_entry
 575 *
 576 *      Clean (but don't drain the write buffer) if the CPU requires
 577 *      these operations.  This is typically used when we are removing
 578 *      PMD entries.
 579 */
 580static inline void flush_pmd_entry(void *pmd)
 581{
 582        const unsigned int __tlb_flag = __cpu_tlb_flags;
 583
 584        tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 585        tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 586
 587        if (tlb_flag(TLB_WB))
 588                dsb(ishst);
 589}
 590
 591static inline void clean_pmd_entry(void *pmd)
 592{
 593        const unsigned int __tlb_flag = __cpu_tlb_flags;
 594
 595        tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 596        tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 597}
 598
 599#undef tlb_op
 600#undef tlb_flag
 601#undef always_tlb_flags
 602#undef possible_tlb_flags
 603
 604/*
 605 * Convert calls to our calling convention.
 606 */
 607#define local_flush_tlb_range(vma,start,end)    __cpu_flush_user_tlb_range(start,end,vma)
 608#define local_flush_tlb_kernel_range(s,e)       __cpu_flush_kern_tlb_range(s,e)
 609
 610#ifndef CONFIG_SMP
 611#define flush_tlb_all           local_flush_tlb_all
 612#define flush_tlb_mm            local_flush_tlb_mm
 613#define flush_tlb_page          local_flush_tlb_page
 614#define flush_tlb_kernel_page   local_flush_tlb_kernel_page
 615#define flush_tlb_range         local_flush_tlb_range
 616#define flush_tlb_kernel_range  local_flush_tlb_kernel_range
 617#define flush_bp_all            local_flush_bp_all
 618#else
 619extern void flush_tlb_all(void);
 620extern void flush_tlb_mm(struct mm_struct *mm);
 621extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 622extern void flush_tlb_kernel_page(unsigned long kaddr);
 623extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 624extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 625extern void flush_bp_all(void);
 626#endif
 627
 628/*
 629 * If PG_dcache_clean is not set for the page, we need to ensure that any
 630 * cache entries for the kernels virtual memory range are written
 631 * back to the page. On ARMv6 and later, the cache coherency is handled via
 632 * the set_pte_at() function.
 633 */
 634#if __LINUX_ARM_ARCH__ < 6
 635extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 636        pte_t *ptep);
 637#else
 638static inline void update_mmu_cache(struct vm_area_struct *vma,
 639                                    unsigned long addr, pte_t *ptep)
 640{
 641}
 642#endif
 643
 644#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 645
 646#endif
 647
 648#elif defined(CONFIG_SMP)       /* !CONFIG_MMU */
 649
 650#ifndef __ASSEMBLY__
 651static inline void local_flush_tlb_all(void)                                                                    { }
 652static inline void local_flush_tlb_mm(struct mm_struct *mm)                                                     { }
 653static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)                        { }
 654static inline void local_flush_tlb_kernel_page(unsigned long kaddr)                                             { }
 655static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)    { }
 656static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)                         { }
 657static inline void local_flush_bp_all(void)                                                                     { }
 658
 659extern void flush_tlb_all(void);
 660extern void flush_tlb_mm(struct mm_struct *mm);
 661extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 662extern void flush_tlb_kernel_page(unsigned long kaddr);
 663extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 664extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 665extern void flush_bp_all(void);
 666#endif  /* __ASSEMBLY__ */
 667
 668#endif
 669
 670#ifndef __ASSEMBLY__
 671#ifdef CONFIG_ARM_ERRATA_798181
 672extern void erratum_a15_798181_init(void);
 673#else
 674static inline void erratum_a15_798181_init(void) {}
 675#endif
 676extern bool (*erratum_a15_798181_handler)(void);
 677
 678static inline bool erratum_a15_798181(void)
 679{
 680        if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
 681                erratum_a15_798181_handler))
 682                return erratum_a15_798181_handler();
 683        return false;
 684}
 685#endif
 686
 687#endif
 688