linux/arch/ia64/include/asm/tlbflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_IA64_TLBFLUSH_H
   3#define _ASM_IA64_TLBFLUSH_H
   4
   5/*
   6 * Copyright (C) 2002 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 */
   9
  10
  11#include <linux/mm.h>
  12
  13#include <asm/intrinsics.h>
  14#include <asm/mmu_context.h>
  15#include <asm/page.h>
  16
  17/*
  18 * Now for some TLB flushing routines.  This is the kind of stuff that
  19 * can be very expensive, so try to avoid them whenever possible.
  20 */
  21extern void setup_ptcg_sem(int max_purges, int from_palo);
  22
  23/*
  24 * Flush everything (kernel mapping may also have changed due to
  25 * vmalloc/vfree).
  26 */
  27extern void local_flush_tlb_all (void);
  28
  29#ifdef CONFIG_SMP
  30  extern void smp_flush_tlb_all (void);
  31  extern void smp_flush_tlb_mm (struct mm_struct *mm);
  32  extern void smp_flush_tlb_cpumask (cpumask_t xcpumask);
  33# define flush_tlb_all()        smp_flush_tlb_all()
  34#else
  35# define flush_tlb_all()        local_flush_tlb_all()
  36# define smp_flush_tlb_cpumask(m) local_flush_tlb_all()
  37#endif
  38
  39static inline void
  40local_finish_flush_tlb_mm (struct mm_struct *mm)
  41{
  42        if (mm == current->active_mm)
  43                activate_context(mm);
  44}
  45
  46/*
  47 * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
  48 * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
  49 * the PTEs of the parent task.
  50 */
  51static inline void
  52flush_tlb_mm (struct mm_struct *mm)
  53{
  54        if (!mm)
  55                return;
  56
  57        set_bit(mm->context, ia64_ctx.flushmap);
  58        mm->context = 0;
  59
  60        if (atomic_read(&mm->mm_users) == 0)
  61                return;         /* happens as a result of exit_mmap() */
  62
  63#ifdef CONFIG_SMP
  64        smp_flush_tlb_mm(mm);
  65#else
  66        local_finish_flush_tlb_mm(mm);
  67#endif
  68}
  69
  70extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
  71
  72/*
  73 * Page-granular tlb flush.
  74 */
  75static inline void
  76flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
  77{
  78#ifdef CONFIG_SMP
  79        flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
  80#else
  81        if (vma->vm_mm == current->active_mm)
  82                ia64_ptcl(addr, (PAGE_SHIFT << 2));
  83        else
  84                vma->vm_mm->context = 0;
  85#endif
  86}
  87
  88/*
  89 * Flush the local TLB. Invoked from another cpu using an IPI.
  90 */
  91#ifdef CONFIG_SMP
  92void smp_local_flush_tlb(void);
  93#else
  94#define smp_local_flush_tlb()
  95#endif
  96
  97static inline void flush_tlb_kernel_range(unsigned long start,
  98                                          unsigned long end)
  99{
 100        flush_tlb_all();        /* XXX fix me */
 101}
 102
 103#endif /* _ASM_IA64_TLBFLUSH_H */
 104