linux/arch/s390/include/asm/tlbflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _S390_TLBFLUSH_H
   3#define _S390_TLBFLUSH_H
   4
   5#include <linux/mm.h>
   6#include <linux/sched.h>
   7#include <asm/processor.h>
   8#include <asm/pgalloc.h>
   9#include <asm/pgtable.h>
  10
  11/*
  12 * Flush all TLB entries on the local CPU.
  13 */
  14static inline void __tlb_flush_local(void)
  15{
  16        asm volatile("ptlb" : : : "memory");
  17}
  18
  19/*
  20 * Flush TLB entries for a specific ASCE on all CPUs
  21 */
  22static inline void __tlb_flush_idte(unsigned long asce)
  23{
  24        unsigned long opt;
  25
  26        opt = IDTE_PTOA;
  27        if (MACHINE_HAS_TLB_GUEST)
  28                opt |= IDTE_GUEST_ASCE;
  29        /* Global TLB flush for the mm */
  30        asm volatile(
  31                "       .insn   rrf,0xb98e0000,0,%0,%1,0"
  32                : : "a" (opt), "a" (asce) : "cc");
  33}
  34
  35#ifdef CONFIG_SMP
  36void smp_ptlb_all(void);
  37
  38/*
  39 * Flush all TLB entries on all CPUs.
  40 */
  41static inline void __tlb_flush_global(void)
  42{
  43        unsigned int dummy = 0;
  44
  45        csp(&dummy, 0, 0);
  46}
  47
  48/*
  49 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  50 * this implicates multiple ASCEs!).
  51 */
  52static inline void __tlb_flush_mm(struct mm_struct *mm)
  53{
  54        unsigned long gmap_asce;
  55
  56        /*
  57         * If the machine has IDTE we prefer to do a per mm flush
  58         * on all cpus instead of doing a local flush if the mm
  59         * only ran on the local cpu.
  60         */
  61        preempt_disable();
  62        atomic_inc(&mm->context.flush_count);
  63        /* Reset TLB flush mask */
  64        cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  65        barrier();
  66        gmap_asce = READ_ONCE(mm->context.gmap_asce);
  67        if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
  68                if (gmap_asce)
  69                        __tlb_flush_idte(gmap_asce);
  70                __tlb_flush_idte(mm->context.asce);
  71        } else {
  72                /* Global TLB flush */
  73                __tlb_flush_global();
  74        }
  75        atomic_dec(&mm->context.flush_count);
  76        preempt_enable();
  77}
  78
  79static inline void __tlb_flush_kernel(void)
  80{
  81        if (MACHINE_HAS_IDTE)
  82                __tlb_flush_idte(init_mm.context.asce);
  83        else
  84                __tlb_flush_global();
  85}
  86#else
  87#define __tlb_flush_global()    __tlb_flush_local()
  88
  89/*
  90 * Flush TLB entries for a specific ASCE on all CPUs.
  91 */
  92static inline void __tlb_flush_mm(struct mm_struct *mm)
  93{
  94        __tlb_flush_local();
  95}
  96
  97static inline void __tlb_flush_kernel(void)
  98{
  99        __tlb_flush_local();
 100}
 101#endif
 102
 103static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 104{
 105        spin_lock(&mm->context.lock);
 106        if (mm->context.flush_mm) {
 107                mm->context.flush_mm = 0;
 108                __tlb_flush_mm(mm);
 109        }
 110        spin_unlock(&mm->context.lock);
 111}
 112
 113/*
 114 * TLB flushing:
 115 *  flush_tlb() - flushes the current mm struct TLBs
 116 *  flush_tlb_all() - flushes all processes TLBs
 117 *  flush_tlb_mm(mm) - flushes the specified mm context TLB's
 118 *  flush_tlb_page(vma, vmaddr) - flushes one page
 119 *  flush_tlb_range(vma, start, end) - flushes a range of pages
 120 *  flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
 121 */
 122
 123/*
 124 * flush_tlb_mm goes together with ptep_set_wrprotect for the
 125 * copy_page_range operation and flush_tlb_range is related to
 126 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
 127 * ptep_get_and_clear do not flush the TLBs directly if the mm has
 128 * only one user. At the end of the update the flush_tlb_mm and
 129 * flush_tlb_range functions need to do the flush.
 130 */
 131#define flush_tlb()                             do { } while (0)
 132#define flush_tlb_all()                         do { } while (0)
 133#define flush_tlb_page(vma, addr)               do { } while (0)
 134
 135static inline void flush_tlb_mm(struct mm_struct *mm)
 136{
 137        __tlb_flush_mm_lazy(mm);
 138}
 139
 140static inline void flush_tlb_range(struct vm_area_struct *vma,
 141                                   unsigned long start, unsigned long end)
 142{
 143        __tlb_flush_mm_lazy(vma->vm_mm);
 144}
 145
 146static inline void flush_tlb_kernel_range(unsigned long start,
 147                                          unsigned long end)
 148{
 149        __tlb_flush_kernel();
 150}
 151
 152#endif /* _S390_TLBFLUSH_H */
 153