linux/arch/powerpc/include/asm/tlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *      TLB shootdown specifics for powerpc
   4 *
   5 * Copyright (C) 2002 Anton Blanchard, IBM Corp.
   6 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
   7 */
   8#ifndef _ASM_POWERPC_TLB_H
   9#define _ASM_POWERPC_TLB_H
  10#ifdef __KERNEL__
  11
  12#ifndef __powerpc64__
  13#include <linux/pgtable.h>
  14#endif
  15#ifndef __powerpc64__
  16#include <asm/page.h>
  17#include <asm/mmu.h>
  18#endif
  19
  20#include <linux/pagemap.h>
  21
  22#define tlb_start_vma(tlb, vma) do { } while (0)
  23#define tlb_end_vma(tlb, vma)   do { } while (0)
  24#define __tlb_remove_tlb_entry  __tlb_remove_tlb_entry
  25
  26#define tlb_flush tlb_flush
  27extern void tlb_flush(struct mmu_gather *tlb);
  28/*
  29 * book3s:
  30 * Hash does not use the linux page-tables, so we can avoid
  31 * the TLB invalidate for page-table freeing, Radix otoh does use the
  32 * page-tables and needs the TLBI.
  33 *
  34 * nohash:
  35 * We still do TLB invalidate in the __pte_free_tlb routine before we
  36 * add the page table pages to mmu gather table batch.
  37 */
  38#define tlb_needs_table_invalidate()    radix_enabled()
  39
  40/* Get the generic bits... */
  41#include <asm-generic/tlb.h>
  42
  43extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
  44                             unsigned long address);
  45
  46static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  47                                          unsigned long address)
  48{
  49#ifdef CONFIG_PPC_BOOK3S_32
  50        if (pte_val(*ptep) & _PAGE_HASHPTE)
  51                flush_hash_entry(tlb->mm, ptep, address);
  52#endif
  53}
  54
  55#ifdef CONFIG_SMP
  56static inline int mm_is_core_local(struct mm_struct *mm)
  57{
  58        return cpumask_subset(mm_cpumask(mm),
  59                              topology_sibling_cpumask(smp_processor_id()));
  60}
  61
  62#ifdef CONFIG_PPC_BOOK3S_64
  63static inline int mm_is_thread_local(struct mm_struct *mm)
  64{
  65        if (atomic_read(&mm->context.active_cpus) > 1)
  66                return false;
  67        return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
  68}
  69#else /* CONFIG_PPC_BOOK3S_64 */
  70static inline int mm_is_thread_local(struct mm_struct *mm)
  71{
  72        return cpumask_equal(mm_cpumask(mm),
  73                              cpumask_of(smp_processor_id()));
  74}
  75#endif /* !CONFIG_PPC_BOOK3S_64 */
  76
  77#else /* CONFIG_SMP */
  78static inline int mm_is_core_local(struct mm_struct *mm)
  79{
  80        return 1;
  81}
  82
  83static inline int mm_is_thread_local(struct mm_struct *mm)
  84{
  85        return 1;
  86}
  87#endif
  88
  89#endif /* __KERNEL__ */
  90#endif /* __ASM_POWERPC_TLB_H */
  91