linux/arch/metag/mm/highmem.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/highmem.h>
   3#include <linux/sched.h>
   4#include <linux/smp.h>
   5#include <linux/interrupt.h>
   6#include <asm/fixmap.h>
   7#include <asm/tlbflush.h>
   8
   9static pte_t *kmap_pte;
  10
  11unsigned long highstart_pfn, highend_pfn;
  12
  13void *kmap(struct page *page)
  14{
  15        might_sleep();
  16        if (!PageHighMem(page))
  17                return page_address(page);
  18        return kmap_high(page);
  19}
  20EXPORT_SYMBOL(kmap);
  21
  22void kunmap(struct page *page)
  23{
  24        BUG_ON(in_interrupt());
  25        if (!PageHighMem(page))
  26                return;
  27        kunmap_high(page);
  28}
  29EXPORT_SYMBOL(kunmap);
  30
  31/*
  32 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  33 * no global lock is needed and because the kmap code must perform a global TLB
  34 * invalidation when the kmap pool wraps.
  35 *
  36 * However when holding an atomic kmap is is not legal to sleep, so atomic
  37 * kmaps are appropriate for short, tight code paths only.
  38 */
  39
  40void *kmap_atomic(struct page *page)
  41{
  42        enum fixed_addresses idx;
  43        unsigned long vaddr;
  44        int type;
  45
  46        preempt_disable();
  47        pagefault_disable();
  48        if (!PageHighMem(page))
  49                return page_address(page);
  50
  51        type = kmap_atomic_idx_push();
  52        idx = type + KM_TYPE_NR * smp_processor_id();
  53        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  54#ifdef CONFIG_DEBUG_HIGHMEM
  55        BUG_ON(!pte_none(*(kmap_pte - idx)));
  56#endif
  57        set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL));
  58
  59        return (void *)vaddr;
  60}
  61EXPORT_SYMBOL(kmap_atomic);
  62
  63void __kunmap_atomic(void *kvaddr)
  64{
  65        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  66        int idx, type;
  67
  68        if (kvaddr >= (void *)FIXADDR_START) {
  69                type = kmap_atomic_idx();
  70                idx = type + KM_TYPE_NR * smp_processor_id();
  71
  72                /*
  73                 * Force other mappings to Oops if they'll try to access this
  74                 * pte without first remap it.  Keeping stale mappings around
  75                 * is a bad idea also, in case the page changes cacheability
  76                 * attributes or becomes a protected page in a hypervisor.
  77                 */
  78                pte_clear(&init_mm, vaddr, kmap_pte-idx);
  79                flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
  80
  81                kmap_atomic_idx_pop();
  82        }
  83
  84        pagefault_enable();
  85        preempt_enable();
  86}
  87EXPORT_SYMBOL(__kunmap_atomic);
  88
  89/*
  90 * This is the same as kmap_atomic() but can map memory that doesn't
  91 * have a struct page associated with it.
  92 */
  93void *kmap_atomic_pfn(unsigned long pfn)
  94{
  95        enum fixed_addresses idx;
  96        unsigned long vaddr;
  97        int type;
  98
  99        preempt_disable();
 100        pagefault_disable();
 101
 102        type = kmap_atomic_idx_push();
 103        idx = type + KM_TYPE_NR * smp_processor_id();
 104        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 105#ifdef CONFIG_DEBUG_HIGHMEM
 106        BUG_ON(!pte_none(*(kmap_pte - idx)));
 107#endif
 108        set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL));
 109        flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
 110
 111        return (void *)vaddr;
 112}
 113
 114void __init kmap_init(void)
 115{
 116        unsigned long kmap_vstart;
 117
 118        /* cache the first kmap pte */
 119        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
 120        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 121}
 122