linux/arch/mips/mm/highmem.c
<<
>>
Prefs
   1#include <linux/compiler.h>
   2#include <linux/module.h>
   3#include <linux/highmem.h>
   4#include <linux/sched.h>
   5#include <linux/smp.h>
   6#include <asm/fixmap.h>
   7#include <asm/tlbflush.h>
   8
   9static pte_t *kmap_pte;
  10
  11unsigned long highstart_pfn, highend_pfn;
  12
  13void *kmap(struct page *page)
  14{
  15        void *addr;
  16
  17        might_sleep();
  18        if (!PageHighMem(page))
  19                return page_address(page);
  20        addr = kmap_high(page);
  21        flush_tlb_one((unsigned long)addr);
  22
  23        return addr;
  24}
  25EXPORT_SYMBOL(kmap);
  26
  27void kunmap(struct page *page)
  28{
  29        BUG_ON(in_interrupt());
  30        if (!PageHighMem(page))
  31                return;
  32        kunmap_high(page);
  33}
  34EXPORT_SYMBOL(kunmap);
  35
  36/*
  37 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  38 * no global lock is needed and because the kmap code must perform a global TLB
  39 * invalidation when the kmap pool wraps.
  40 *
  41 * However when holding an atomic kmap is is not legal to sleep, so atomic
  42 * kmaps are appropriate for short, tight code paths only.
  43 */
  44
  45void *kmap_atomic(struct page *page)
  46{
  47        unsigned long vaddr;
  48        int idx, type;
  49
  50        preempt_disable();
  51        pagefault_disable();
  52        if (!PageHighMem(page))
  53                return page_address(page);
  54
  55        type = kmap_atomic_idx_push();
  56        idx = type + KM_TYPE_NR*smp_processor_id();
  57        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  58#ifdef CONFIG_DEBUG_HIGHMEM
  59        BUG_ON(!pte_none(*(kmap_pte - idx)));
  60#endif
  61        set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  62        local_flush_tlb_one((unsigned long)vaddr);
  63
  64        return (void*) vaddr;
  65}
  66EXPORT_SYMBOL(kmap_atomic);
  67
  68void __kunmap_atomic(void *kvaddr)
  69{
  70        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  71        int type __maybe_unused;
  72
  73        if (vaddr < FIXADDR_START) { // FIXME
  74                pagefault_enable();
  75                preempt_enable();
  76                return;
  77        }
  78
  79        type = kmap_atomic_idx();
  80#ifdef CONFIG_DEBUG_HIGHMEM
  81        {
  82                int idx = type + KM_TYPE_NR * smp_processor_id();
  83
  84                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  85
  86                /*
  87                 * force other mappings to Oops if they'll try to access
  88                 * this pte without first remap it
  89                 */
  90                pte_clear(&init_mm, vaddr, kmap_pte-idx);
  91                local_flush_tlb_one(vaddr);
  92        }
  93#endif
  94        kmap_atomic_idx_pop();
  95        pagefault_enable();
  96        preempt_enable();
  97}
  98EXPORT_SYMBOL(__kunmap_atomic);
  99
 100/*
 101 * This is the same as kmap_atomic() but can map memory that doesn't
 102 * have a struct page associated with it.
 103 */
 104void *kmap_atomic_pfn(unsigned long pfn)
 105{
 106        unsigned long vaddr;
 107        int idx, type;
 108
 109        preempt_disable();
 110        pagefault_disable();
 111
 112        type = kmap_atomic_idx_push();
 113        idx = type + KM_TYPE_NR*smp_processor_id();
 114        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 115        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
 116        flush_tlb_one(vaddr);
 117
 118        return (void*) vaddr;
 119}
 120
 121void __init kmap_init(void)
 122{
 123        unsigned long kmap_vstart;
 124
 125        /* cache the first kmap pte */
 126        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
 127        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 128}
 129