linux/arch/mips/mm/highmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/compiler.h>
   3#include <linux/init.h>
   4#include <linux/export.h>
   5#include <linux/highmem.h>
   6#include <linux/sched.h>
   7#include <linux/smp.h>
   8#include <asm/fixmap.h>
   9#include <asm/tlbflush.h>
  10
  11static pte_t *kmap_pte;
  12
  13unsigned long highstart_pfn, highend_pfn;
  14
  15void kmap_flush_tlb(unsigned long addr)
  16{
  17        flush_tlb_one(addr);
  18}
  19EXPORT_SYMBOL(kmap_flush_tlb);
  20
  21void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
  22{
  23        unsigned long vaddr;
  24        int idx, type;
  25
  26        type = kmap_atomic_idx_push();
  27        idx = type + KM_TYPE_NR*smp_processor_id();
  28        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  29#ifdef CONFIG_DEBUG_HIGHMEM
  30        BUG_ON(!pte_none(*(kmap_pte - idx)));
  31#endif
  32        set_pte(kmap_pte-idx, mk_pte(page, prot));
  33        local_flush_tlb_one((unsigned long)vaddr);
  34
  35        return (void*) vaddr;
  36}
  37EXPORT_SYMBOL(kmap_atomic_high_prot);
  38
  39void kunmap_atomic_high(void *kvaddr)
  40{
  41        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  42        int type __maybe_unused;
  43
  44        if (vaddr < FIXADDR_START)
  45                return;
  46
  47        type = kmap_atomic_idx();
  48#ifdef CONFIG_DEBUG_HIGHMEM
  49        {
  50                int idx = type + KM_TYPE_NR * smp_processor_id();
  51
  52                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  53
  54                /*
  55                 * force other mappings to Oops if they'll try to access
  56                 * this pte without first remap it
  57                 */
  58                pte_clear(&init_mm, vaddr, kmap_pte-idx);
  59                local_flush_tlb_one(vaddr);
  60        }
  61#endif
  62        kmap_atomic_idx_pop();
  63}
  64EXPORT_SYMBOL(kunmap_atomic_high);
  65
  66/*
  67 * This is the same as kmap_atomic() but can map memory that doesn't
  68 * have a struct page associated with it.
  69 */
  70void *kmap_atomic_pfn(unsigned long pfn)
  71{
  72        unsigned long vaddr;
  73        int idx, type;
  74
  75        preempt_disable();
  76        pagefault_disable();
  77
  78        type = kmap_atomic_idx_push();
  79        idx = type + KM_TYPE_NR*smp_processor_id();
  80        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  81        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  82        flush_tlb_one(vaddr);
  83
  84        return (void*) vaddr;
  85}
  86
  87void __init kmap_init(void)
  88{
  89        unsigned long kmap_vstart;
  90
  91        /* cache the first kmap pte */
  92        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  93        kmap_pte = virt_to_kpte(kmap_vstart);
  94}
  95