linux/arch/mips/mm/highmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/compiler.h>
   3#include <linux/init.h>
   4#include <linux/export.h>
   5#include <linux/highmem.h>
   6#include <linux/sched.h>
   7#include <linux/smp.h>
   8#include <asm/fixmap.h>
   9#include <asm/tlbflush.h>
  10
  11static pte_t *kmap_pte;
  12
  13unsigned long highstart_pfn, highend_pfn;
  14
  15void *kmap(struct page *page)
  16{
  17        void *addr;
  18
  19        might_sleep();
  20        if (!PageHighMem(page))
  21                return page_address(page);
  22        addr = kmap_high(page);
  23        flush_tlb_one((unsigned long)addr);
  24
  25        return addr;
  26}
  27EXPORT_SYMBOL(kmap);
  28
  29void kunmap(struct page *page)
  30{
  31        BUG_ON(in_interrupt());
  32        if (!PageHighMem(page))
  33                return;
  34        kunmap_high(page);
  35}
  36EXPORT_SYMBOL(kunmap);
  37
  38/*
  39 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  40 * no global lock is needed and because the kmap code must perform a global TLB
  41 * invalidation when the kmap pool wraps.
  42 *
  43 * However when holding an atomic kmap is is not legal to sleep, so atomic
  44 * kmaps are appropriate for short, tight code paths only.
  45 */
  46
  47void *kmap_atomic(struct page *page)
  48{
  49        unsigned long vaddr;
  50        int idx, type;
  51
  52        preempt_disable();
  53        pagefault_disable();
  54        if (!PageHighMem(page))
  55                return page_address(page);
  56
  57        type = kmap_atomic_idx_push();
  58        idx = type + KM_TYPE_NR*smp_processor_id();
  59        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  60#ifdef CONFIG_DEBUG_HIGHMEM
  61        BUG_ON(!pte_none(*(kmap_pte - idx)));
  62#endif
  63        set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  64        local_flush_tlb_one((unsigned long)vaddr);
  65
  66        return (void*) vaddr;
  67}
  68EXPORT_SYMBOL(kmap_atomic);
  69
  70void __kunmap_atomic(void *kvaddr)
  71{
  72        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  73        int type __maybe_unused;
  74
  75        if (vaddr < FIXADDR_START) { // FIXME
  76                pagefault_enable();
  77                preempt_enable();
  78                return;
  79        }
  80
  81        type = kmap_atomic_idx();
  82#ifdef CONFIG_DEBUG_HIGHMEM
  83        {
  84                int idx = type + KM_TYPE_NR * smp_processor_id();
  85
  86                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  87
  88                /*
  89                 * force other mappings to Oops if they'll try to access
  90                 * this pte without first remap it
  91                 */
  92                pte_clear(&init_mm, vaddr, kmap_pte-idx);
  93                local_flush_tlb_one(vaddr);
  94        }
  95#endif
  96        kmap_atomic_idx_pop();
  97        pagefault_enable();
  98        preempt_enable();
  99}
 100EXPORT_SYMBOL(__kunmap_atomic);
 101
 102/*
 103 * This is the same as kmap_atomic() but can map memory that doesn't
 104 * have a struct page associated with it.
 105 */
 106void *kmap_atomic_pfn(unsigned long pfn)
 107{
 108        unsigned long vaddr;
 109        int idx, type;
 110
 111        preempt_disable();
 112        pagefault_disable();
 113
 114        type = kmap_atomic_idx_push();
 115        idx = type + KM_TYPE_NR*smp_processor_id();
 116        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 117        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
 118        flush_tlb_one(vaddr);
 119
 120        return (void*) vaddr;
 121}
 122
 123void __init kmap_init(void)
 124{
 125        unsigned long kmap_vstart;
 126
 127        /* cache the first kmap pte */
 128        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
 129        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 130}
 131