linux/arch/arm/mm/highmem.c
<<
>>
Prefs
   1/*
   2 * arch/arm/mm/highmem.c -- ARM highmem support
   3 *
   4 * Author:      Nicolas Pitre
   5 * Created:     september 8, 2008
   6 * Copyright:   Marvell Semiconductors Inc.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <asm/fixmap.h>
  17#include <asm/cacheflush.h>
  18#include <asm/tlbflush.h>
  19#include "mm.h"
  20
  21void *kmap(struct page *page)
  22{
  23        might_sleep();
  24        if (!PageHighMem(page))
  25                return page_address(page);
  26        return kmap_high(page);
  27}
  28EXPORT_SYMBOL(kmap);
  29
  30void kunmap(struct page *page)
  31{
  32        BUG_ON(in_interrupt());
  33        if (!PageHighMem(page))
  34                return;
  35        kunmap_high(page);
  36}
  37EXPORT_SYMBOL(kunmap);
  38
  39void *kmap_atomic(struct page *page)
  40{
  41        unsigned int idx;
  42        unsigned long vaddr;
  43        void *kmap;
  44        int type;
  45
  46        pagefault_disable();
  47        if (!PageHighMem(page))
  48                return page_address(page);
  49
  50#ifdef CONFIG_DEBUG_HIGHMEM
  51        /*
  52         * There is no cache coherency issue when non VIVT, so force the
  53         * dedicated kmap usage for better debugging purposes in that case.
  54         */
  55        if (!cache_is_vivt())
  56                kmap = NULL;
  57        else
  58#endif
  59                kmap = kmap_high_get(page);
  60        if (kmap)
  61                return kmap;
  62
  63        type = kmap_atomic_idx_push();
  64
  65        idx = type + KM_TYPE_NR * smp_processor_id();
  66        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  67#ifdef CONFIG_DEBUG_HIGHMEM
  68        /*
  69         * With debugging enabled, kunmap_atomic forces that entry to 0.
  70         * Make sure it was indeed properly unmapped.
  71         */
  72        BUG_ON(!pte_none(get_top_pte(vaddr)));
  73#endif
  74        /*
  75         * When debugging is off, kunmap_atomic leaves the previous mapping
  76         * in place, so the contained TLB flush ensures the TLB is updated
  77         * with the new mapping.
  78         */
  79        set_top_pte(vaddr, mk_pte(page, kmap_prot));
  80
  81        return (void *)vaddr;
  82}
  83EXPORT_SYMBOL(kmap_atomic);
  84
  85void __kunmap_atomic(void *kvaddr)
  86{
  87        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  88        int idx, type;
  89
  90        if (kvaddr >= (void *)FIXADDR_START) {
  91                type = kmap_atomic_idx();
  92                idx = type + KM_TYPE_NR * smp_processor_id();
  93
  94                if (cache_is_vivt())
  95                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  96#ifdef CONFIG_DEBUG_HIGHMEM
  97                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  98                set_top_pte(vaddr, __pte(0));
  99#else
 100                (void) idx;  /* to kill a warning */
 101#endif
 102                kmap_atomic_idx_pop();
 103        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
 104                /* this address was obtained through kmap_high_get() */
 105                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
 106        }
 107        pagefault_enable();
 108}
 109EXPORT_SYMBOL(__kunmap_atomic);
 110
 111void *kmap_atomic_pfn(unsigned long pfn)
 112{
 113        unsigned long vaddr;
 114        int idx, type;
 115
 116        pagefault_disable();
 117
 118        type = kmap_atomic_idx_push();
 119        idx = type + KM_TYPE_NR * smp_processor_id();
 120        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 121#ifdef CONFIG_DEBUG_HIGHMEM
 122        BUG_ON(!pte_none(get_top_pte(vaddr)));
 123#endif
 124        set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
 125
 126        return (void *)vaddr;
 127}
 128
 129struct page *kmap_atomic_to_page(const void *ptr)
 130{
 131        unsigned long vaddr = (unsigned long)ptr;
 132
 133        if (vaddr < FIXADDR_START)
 134                return virt_to_page(ptr);
 135
 136        return pte_page(get_top_pte(vaddr));
 137}
 138