linux/arch/arm/include/asm/tlbflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  arch/arm/include/asm/tlbflush.h
   4 *
   5 *  Copyright (C) 1999-2003 Russell King
   6 */
   7#ifndef _ASMARM_TLBFLUSH_H
   8#define _ASMARM_TLBFLUSH_H
   9
  10#ifndef __ASSEMBLY__
  11# include <linux/mm_types.h>
  12#endif
  13
  14#ifdef CONFIG_MMU
  15
  16#include <asm/glue.h>
  17
  18#define TLB_V4_U_PAGE   (1 << 1)
  19#define TLB_V4_D_PAGE   (1 << 2)
  20#define TLB_V4_I_PAGE   (1 << 3)
  21#define TLB_V6_U_PAGE   (1 << 4)
  22#define TLB_V6_D_PAGE   (1 << 5)
  23#define TLB_V6_I_PAGE   (1 << 6)
  24
  25#define TLB_V4_U_FULL   (1 << 9)
  26#define TLB_V4_D_FULL   (1 << 10)
  27#define TLB_V4_I_FULL   (1 << 11)
  28#define TLB_V6_U_FULL   (1 << 12)
  29#define TLB_V6_D_FULL   (1 << 13)
  30#define TLB_V6_I_FULL   (1 << 14)
  31
  32#define TLB_V6_U_ASID   (1 << 16)
  33#define TLB_V6_D_ASID   (1 << 17)
  34#define TLB_V6_I_ASID   (1 << 18)
  35
  36#define TLB_V6_BP       (1 << 19)
  37
  38/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
  39#define TLB_V7_UIS_PAGE (1 << 20)
  40#define TLB_V7_UIS_FULL (1 << 21)
  41#define TLB_V7_UIS_ASID (1 << 22)
  42#define TLB_V7_UIS_BP   (1 << 23)
  43
  44#define TLB_BARRIER     (1 << 28)
  45#define TLB_L2CLEAN_FR  (1 << 29)               /* Feroceon */
  46#define TLB_DCLEAN      (1 << 30)
  47#define TLB_WB          (1 << 31)
  48
  49/*
  50 *      MMU TLB Model
  51 *      =============
  52 *
  53 *      We have the following to choose from:
  54 *        v4    - ARMv4 without write buffer
  55 *        v4wb  - ARMv4 with write buffer without I TLB flush entry instruction
  56 *        v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
  57 *        fr    - Feroceon (v4wbi with non-outer-cacheable page table walks)
  58 *        fa    - Faraday (v4 with write buffer with UTLB)
  59 *        v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
  60 *        v7wbi - identical to v6wbi
  61 */
  62#undef _TLB
  63#undef MULTI_TLB
  64
  65#ifdef CONFIG_SMP_ON_UP
  66#define MULTI_TLB 1
  67#endif
  68
  69#define v4_tlb_flags    (TLB_V4_U_FULL | TLB_V4_U_PAGE)
  70
  71#ifdef CONFIG_CPU_TLB_V4WT
  72# define v4_possible_flags      v4_tlb_flags
  73# define v4_always_flags        v4_tlb_flags
  74# ifdef _TLB
  75#  define MULTI_TLB 1
  76# else
  77#  define _TLB v4
  78# endif
  79#else
  80# define v4_possible_flags      0
  81# define v4_always_flags        (-1UL)
  82#endif
  83
  84#define fa_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
  85                         TLB_V4_U_FULL | TLB_V4_U_PAGE)
  86
  87#ifdef CONFIG_CPU_TLB_FA
  88# define fa_possible_flags      fa_tlb_flags
  89# define fa_always_flags        fa_tlb_flags
  90# ifdef _TLB
  91#  define MULTI_TLB 1
  92# else
  93#  define _TLB fa
  94# endif
  95#else
  96# define fa_possible_flags      0
  97# define fa_always_flags        (-1UL)
  98#endif
  99
 100#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
 101                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 102                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 103
 104#ifdef CONFIG_CPU_TLB_V4WBI
 105# define v4wbi_possible_flags   v4wbi_tlb_flags
 106# define v4wbi_always_flags     v4wbi_tlb_flags
 107# ifdef _TLB
 108#  define MULTI_TLB 1
 109# else
 110#  define _TLB v4wbi
 111# endif
 112#else
 113# define v4wbi_possible_flags   0
 114# define v4wbi_always_flags     (-1UL)
 115#endif
 116
 117#define fr_tlb_flags    (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
 118                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 119                         TLB_V4_I_PAGE | TLB_V4_D_PAGE)
 120
 121#ifdef CONFIG_CPU_TLB_FEROCEON
 122# define fr_possible_flags      fr_tlb_flags
 123# define fr_always_flags        fr_tlb_flags
 124# ifdef _TLB
 125#  define MULTI_TLB 1
 126# else
 127#  define _TLB v4wbi
 128# endif
 129#else
 130# define fr_possible_flags      0
 131# define fr_always_flags        (-1UL)
 132#endif
 133
 134#define v4wb_tlb_flags  (TLB_WB | TLB_DCLEAN | \
 135                         TLB_V4_I_FULL | TLB_V4_D_FULL | \
 136                         TLB_V4_D_PAGE)
 137
 138#ifdef CONFIG_CPU_TLB_V4WB
 139# define v4wb_possible_flags    v4wb_tlb_flags
 140# define v4wb_always_flags      v4wb_tlb_flags
 141# ifdef _TLB
 142#  define MULTI_TLB 1
 143# else
 144#  define _TLB v4wb
 145# endif
 146#else
 147# define v4wb_possible_flags    0
 148# define v4wb_always_flags      (-1UL)
 149#endif
 150
 151#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 152                         TLB_V6_I_FULL | TLB_V6_D_FULL | \
 153                         TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
 154                         TLB_V6_I_ASID | TLB_V6_D_ASID | \
 155                         TLB_V6_BP)
 156
 157#ifdef CONFIG_CPU_TLB_V6
 158# define v6wbi_possible_flags   v6wbi_tlb_flags
 159# define v6wbi_always_flags     v6wbi_tlb_flags
 160# ifdef _TLB
 161#  define MULTI_TLB 1
 162# else
 163#  define _TLB v6wbi
 164# endif
 165#else
 166# define v6wbi_possible_flags   0
 167# define v6wbi_always_flags     (-1UL)
 168#endif
 169
 170#define v7wbi_tlb_flags_smp     (TLB_WB | TLB_BARRIER | \
 171                                 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
 172                                 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
 173#define v7wbi_tlb_flags_up      (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
 174                                 TLB_V6_U_FULL | TLB_V6_U_PAGE | \
 175                                 TLB_V6_U_ASID | TLB_V6_BP)
 176
 177#ifdef CONFIG_CPU_TLB_V7
 178
 179# ifdef CONFIG_SMP_ON_UP
 180#  define v7wbi_possible_flags  (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
 181#  define v7wbi_always_flags    (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
 182# elif defined(CONFIG_SMP)
 183#  define v7wbi_possible_flags  v7wbi_tlb_flags_smp
 184#  define v7wbi_always_flags    v7wbi_tlb_flags_smp
 185# else
 186#  define v7wbi_possible_flags  v7wbi_tlb_flags_up
 187#  define v7wbi_always_flags    v7wbi_tlb_flags_up
 188# endif
 189# ifdef _TLB
 190#  define MULTI_TLB 1
 191# else
 192#  define _TLB v7wbi
 193# endif
 194#else
 195# define v7wbi_possible_flags   0
 196# define v7wbi_always_flags     (-1UL)
 197#endif
 198
 199#ifndef _TLB
 200#error Unknown TLB model
 201#endif
 202
 203#ifndef __ASSEMBLY__
 204
 205#include <linux/sched.h>
 206
 207struct cpu_tlb_fns {
 208        void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
 209        void (*flush_kern_range)(unsigned long, unsigned long);
 210        unsigned long tlb_flags;
 211};
 212
 213/*
 214 * Select the calling method
 215 */
 216#ifdef MULTI_TLB
 217
 218#define __cpu_flush_user_tlb_range      cpu_tlb.flush_user_range
 219#define __cpu_flush_kern_tlb_range      cpu_tlb.flush_kern_range
 220
 221#else
 222
 223#define __cpu_flush_user_tlb_range      __glue(_TLB,_flush_user_tlb_range)
 224#define __cpu_flush_kern_tlb_range      __glue(_TLB,_flush_kern_tlb_range)
 225
 226extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 227extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 228
 229#endif
 230
 231extern struct cpu_tlb_fns cpu_tlb;
 232
 233#define __cpu_tlb_flags                 cpu_tlb.tlb_flags
 234
 235/*
 236 *      TLB Management
 237 *      ==============
 238 *
 239 *      The arch/arm/mm/tlb-*.S files implement these methods.
 240 *
 241 *      The TLB specific code is expected to perform whatever tests it
 242 *      needs to determine if it should invalidate the TLB for each
 243 *      call.  Start addresses are inclusive and end addresses are
 244 *      exclusive; it is safe to round these addresses down.
 245 *
 246 *      flush_tlb_all()
 247 *
 248 *              Invalidate the entire TLB.
 249 *
 250 *      flush_tlb_mm(mm)
 251 *
 252 *              Invalidate all TLB entries in a particular address
 253 *              space.
 254 *              - mm    - mm_struct describing address space
 255 *
 256 *      flush_tlb_range(vma,start,end)
 257 *
 258 *              Invalidate a range of TLB entries in the specified
 259 *              address space.
 260 *              - mm    - mm_struct describing address space
 261 *              - start - start address (may not be aligned)
 262 *              - end   - end address (exclusive, may not be aligned)
 263 *
 264 *      flush_tlb_page(vma, uaddr)
 265 *
 266 *              Invalidate the specified page in the specified address range.
 267 *              - vma   - vm_area_struct describing address range
 268 *              - vaddr - virtual address (may not be aligned)
 269 */
 270
 271/*
 272 * We optimise the code below by:
 273 *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 274 *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 275 *  - if we're going to need __cpu_tlb_flags, access it once and only once
 276 *
 277 * This allows us to build optimal assembly for the single-CPU type case,
 278 * and as close to optimal given the compiler constrants for multi-CPU
 279 * case.  We could do better for the multi-CPU case if the compiler
 280 * implemented the "%?" method, but this has been discontinued due to too
 281 * many people getting it wrong.
 282 */
 283#define possible_tlb_flags      (v4_possible_flags | \
 284                                 v4wbi_possible_flags | \
 285                                 fr_possible_flags | \
 286                                 v4wb_possible_flags | \
 287                                 fa_possible_flags | \
 288                                 v6wbi_possible_flags | \
 289                                 v7wbi_possible_flags)
 290
 291#define always_tlb_flags        (v4_always_flags & \
 292                                 v4wbi_always_flags & \
 293                                 fr_always_flags & \
 294                                 v4wb_always_flags & \
 295                                 fa_always_flags & \
 296                                 v6wbi_always_flags & \
 297                                 v7wbi_always_flags)
 298
 299#define tlb_flag(f)     ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
 300
 301#define __tlb_op(f, insnarg, arg)                                       \
 302        do {                                                            \
 303                if (always_tlb_flags & (f))                             \
 304                        asm("mcr " insnarg                              \
 305                            : : "r" (arg) : "cc");                      \
 306                else if (possible_tlb_flags & (f))                      \
 307                        asm("tst %1, %2\n\t"                            \
 308                            "mcrne " insnarg                            \
 309                            : : "r" (arg), "r" (__tlb_flag), "Ir" (f)   \
 310                            : "cc");                                    \
 311        } while (0)
 312
 313#define tlb_op(f, regs, arg)    __tlb_op(f, "p15, 0, %0, " regs, arg)
 314#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
 315
 316static inline void __local_flush_tlb_all(void)
 317{
 318        const int zero = 0;
 319        const unsigned int __tlb_flag = __cpu_tlb_flags;
 320
 321        tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
 322        tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
 323        tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
 324}
 325
 326static inline void local_flush_tlb_all(void)
 327{
 328        const int zero = 0;
 329        const unsigned int __tlb_flag = __cpu_tlb_flags;
 330
 331        if (tlb_flag(TLB_WB))
 332                dsb(nshst);
 333
 334        __local_flush_tlb_all();
 335        tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
 336
 337        if (tlb_flag(TLB_BARRIER)) {
 338                dsb(nsh);
 339                isb();
 340        }
 341}
 342
 343static inline void __flush_tlb_all(void)
 344{
 345        const int zero = 0;
 346        const unsigned int __tlb_flag = __cpu_tlb_flags;
 347
 348        if (tlb_flag(TLB_WB))
 349                dsb(ishst);
 350
 351        __local_flush_tlb_all();
 352        tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
 353
 354        if (tlb_flag(TLB_BARRIER)) {
 355                dsb(ish);
 356                isb();
 357        }
 358}
 359
 360static inline void __local_flush_tlb_mm(struct mm_struct *mm)
 361{
 362        const int zero = 0;
 363        const int asid = ASID(mm);
 364        const unsigned int __tlb_flag = __cpu_tlb_flags;
 365
 366        if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
 367                if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
 368                        tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
 369                        tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
 370                        tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
 371                }
 372        }
 373
 374        tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
 375        tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
 376        tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
 377}
 378
 379static inline void local_flush_tlb_mm(struct mm_struct *mm)
 380{
 381        const int asid = ASID(mm);
 382        const unsigned int __tlb_flag = __cpu_tlb_flags;
 383
 384        if (tlb_flag(TLB_WB))
 385                dsb(nshst);
 386
 387        __local_flush_tlb_mm(mm);
 388        tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
 389
 390        if (tlb_flag(TLB_BARRIER))
 391                dsb(nsh);
 392}
 393
 394static inline void __flush_tlb_mm(struct mm_struct *mm)
 395{
 396        const unsigned int __tlb_flag = __cpu_tlb_flags;
 397
 398        if (tlb_flag(TLB_WB))
 399                dsb(ishst);
 400
 401        __local_flush_tlb_mm(mm);
 402#ifdef CONFIG_ARM_ERRATA_720789
 403        tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
 404#else
 405        tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
 406#endif
 407
 408        if (tlb_flag(TLB_BARRIER))
 409                dsb(ish);
 410}
 411
 412static inline void
 413__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 414{
 415        const int zero = 0;
 416        const unsigned int __tlb_flag = __cpu_tlb_flags;
 417
 418        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 419
 420        if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
 421            cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
 422                tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
 423                tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
 424                tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
 425                if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 426                        asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 427        }
 428
 429        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
 430        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
 431        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
 432}
 433
 434static inline void
 435local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 436{
 437        const unsigned int __tlb_flag = __cpu_tlb_flags;
 438
 439        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 440
 441        if (tlb_flag(TLB_WB))
 442                dsb(nshst);
 443
 444        __local_flush_tlb_page(vma, uaddr);
 445        tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
 446
 447        if (tlb_flag(TLB_BARRIER))
 448                dsb(nsh);
 449}
 450
 451static inline void
 452__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 453{
 454        const unsigned int __tlb_flag = __cpu_tlb_flags;
 455
 456        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 457
 458        if (tlb_flag(TLB_WB))
 459                dsb(ishst);
 460
 461        __local_flush_tlb_page(vma, uaddr);
 462#ifdef CONFIG_ARM_ERRATA_720789
 463        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
 464#else
 465        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
 466#endif
 467
 468        if (tlb_flag(TLB_BARRIER))
 469                dsb(ish);
 470}
 471
 472static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
 473{
 474        const int zero = 0;
 475        const unsigned int __tlb_flag = __cpu_tlb_flags;
 476
 477        tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
 478        tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
 479        tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
 480        if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
 481                asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
 482
 483        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
 484        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
 485        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
 486}
 487
 488static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 489{
 490        const unsigned int __tlb_flag = __cpu_tlb_flags;
 491
 492        kaddr &= PAGE_MASK;
 493
 494        if (tlb_flag(TLB_WB))
 495                dsb(nshst);
 496
 497        __local_flush_tlb_kernel_page(kaddr);
 498        tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
 499
 500        if (tlb_flag(TLB_BARRIER)) {
 501                dsb(nsh);
 502                isb();
 503        }
 504}
 505
 506static inline void __flush_tlb_kernel_page(unsigned long kaddr)
 507{
 508        const unsigned int __tlb_flag = __cpu_tlb_flags;
 509
 510        kaddr &= PAGE_MASK;
 511
 512        if (tlb_flag(TLB_WB))
 513                dsb(ishst);
 514
 515        __local_flush_tlb_kernel_page(kaddr);
 516        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
 517
 518        if (tlb_flag(TLB_BARRIER)) {
 519                dsb(ish);
 520                isb();
 521        }
 522}
 523
 524/*
 525 * Branch predictor maintenance is paired with full TLB invalidation, so
 526 * there is no need for any barriers here.
 527 */
 528static inline void __local_flush_bp_all(void)
 529{
 530        const int zero = 0;
 531        const unsigned int __tlb_flag = __cpu_tlb_flags;
 532
 533        if (tlb_flag(TLB_V6_BP))
 534                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 535}
 536
 537static inline void local_flush_bp_all(void)
 538{
 539        const int zero = 0;
 540        const unsigned int __tlb_flag = __cpu_tlb_flags;
 541
 542        __local_flush_bp_all();
 543        if (tlb_flag(TLB_V7_UIS_BP))
 544                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
 545}
 546
 547static inline void __flush_bp_all(void)
 548{
 549        const int zero = 0;
 550        const unsigned int __tlb_flag = __cpu_tlb_flags;
 551
 552        __local_flush_bp_all();
 553        if (tlb_flag(TLB_V7_UIS_BP))
 554                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 555}
 556
 557/*
 558 *      flush_pmd_entry
 559 *
 560 *      Flush a PMD entry (word aligned, or double-word aligned) to
 561 *      RAM if the TLB for the CPU we are running on requires this.
 562 *      This is typically used when we are creating PMD entries.
 563 *
 564 *      clean_pmd_entry
 565 *
 566 *      Clean (but don't drain the write buffer) if the CPU requires
 567 *      these operations.  This is typically used when we are removing
 568 *      PMD entries.
 569 */
 570static inline void flush_pmd_entry(void *pmd)
 571{
 572        const unsigned int __tlb_flag = __cpu_tlb_flags;
 573
 574        tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 575        tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 576
 577        if (tlb_flag(TLB_WB))
 578                dsb(ishst);
 579}
 580
 581static inline void clean_pmd_entry(void *pmd)
 582{
 583        const unsigned int __tlb_flag = __cpu_tlb_flags;
 584
 585        tlb_op(TLB_DCLEAN, "c7, c10, 1  @ flush_pmd", pmd);
 586        tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 587}
 588
 589#undef tlb_op
 590#undef tlb_flag
 591#undef always_tlb_flags
 592#undef possible_tlb_flags
 593
 594/*
 595 * Convert calls to our calling convention.
 596 */
 597#define local_flush_tlb_range(vma,start,end)    __cpu_flush_user_tlb_range(start,end,vma)
 598#define local_flush_tlb_kernel_range(s,e)       __cpu_flush_kern_tlb_range(s,e)
 599
 600#ifndef CONFIG_SMP
 601#define flush_tlb_all           local_flush_tlb_all
 602#define flush_tlb_mm            local_flush_tlb_mm
 603#define flush_tlb_page          local_flush_tlb_page
 604#define flush_tlb_kernel_page   local_flush_tlb_kernel_page
 605#define flush_tlb_range         local_flush_tlb_range
 606#define flush_tlb_kernel_range  local_flush_tlb_kernel_range
 607#define flush_bp_all            local_flush_bp_all
 608#else
 609extern void flush_tlb_all(void);
 610extern void flush_tlb_mm(struct mm_struct *mm);
 611extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 612extern void flush_tlb_kernel_page(unsigned long kaddr);
 613extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 614extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 615extern void flush_bp_all(void);
 616#endif
 617
 618/*
 619 * If PG_dcache_clean is not set for the page, we need to ensure that any
 620 * cache entries for the kernels virtual memory range are written
 621 * back to the page. On ARMv6 and later, the cache coherency is handled via
 622 * the set_pte_at() function.
 623 */
 624#if __LINUX_ARM_ARCH__ < 6
 625extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 626        pte_t *ptep);
 627#else
 628static inline void update_mmu_cache(struct vm_area_struct *vma,
 629                                    unsigned long addr, pte_t *ptep)
 630{
 631}
 632#endif
 633
 634#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 635
 636#endif
 637
 638#elif defined(CONFIG_SMP)       /* !CONFIG_MMU */
 639
 640#ifndef __ASSEMBLY__
 641static inline void local_flush_tlb_all(void)                                                                    { }
 642static inline void local_flush_tlb_mm(struct mm_struct *mm)                                                     { }
 643static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)                        { }
 644static inline void local_flush_tlb_kernel_page(unsigned long kaddr)                                             { }
 645static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)    { }
 646static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)                         { }
 647static inline void local_flush_bp_all(void)                                                                     { }
 648
 649extern void flush_tlb_all(void);
 650extern void flush_tlb_mm(struct mm_struct *mm);
 651extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
 652extern void flush_tlb_kernel_page(unsigned long kaddr);
 653extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 654extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 655extern void flush_bp_all(void);
 656#endif  /* __ASSEMBLY__ */
 657
 658#endif
 659
 660#ifndef __ASSEMBLY__
 661#ifdef CONFIG_ARM_ERRATA_798181
 662extern void erratum_a15_798181_init(void);
 663#else
 664static inline void erratum_a15_798181_init(void) {}
 665#endif
 666extern bool (*erratum_a15_798181_handler)(void);
 667
 668static inline bool erratum_a15_798181(void)
 669{
 670        if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
 671                erratum_a15_798181_handler))
 672                return erratum_a15_798181_handler();
 673        return false;
 674}
 675#endif
 676
 677#endif
 678