linux/arch/arm/mm/highmem.c
<<
>>
Prefs
   1/*
   2 * arch/arm/mm/highmem.c -- ARM highmem support
   3 *
   4 * Author:      Nicolas Pitre
   5 * Created:     september 8, 2008
   6 * Copyright:   Marvell Semiconductors Inc.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/interrupt.h>
  16#include <asm/fixmap.h>
  17#include <asm/cacheflush.h>
  18#include <asm/tlbflush.h>
  19#include "mm.h"
  20
  21void *kmap(struct page *page)
  22{
  23        might_sleep();
  24        if (!PageHighMem(page))
  25                return page_address(page);
  26        return kmap_high(page);
  27}
  28EXPORT_SYMBOL(kmap);
  29
  30void kunmap(struct page *page)
  31{
  32        BUG_ON(in_interrupt());
  33        if (!PageHighMem(page))
  34                return;
  35        kunmap_high(page);
  36}
  37EXPORT_SYMBOL(kunmap);
  38
  39void *__kmap_atomic(struct page *page)
  40{
  41        unsigned int idx;
  42        unsigned long vaddr;
  43        void *kmap;
  44        int type;
  45
  46        pagefault_disable();
  47        if (!PageHighMem(page))
  48                return page_address(page);
  49
  50#ifdef CONFIG_DEBUG_HIGHMEM
  51        /*
  52         * There is no cache coherency issue when non VIVT, so force the
  53         * dedicated kmap usage for better debugging purposes in that case.
  54         */
  55        if (!cache_is_vivt())
  56                kmap = NULL;
  57        else
  58#endif
  59                kmap = kmap_high_get(page);
  60        if (kmap)
  61                return kmap;
  62
  63        type = kmap_atomic_idx_push();
  64
  65        idx = type + KM_TYPE_NR * smp_processor_id();
  66        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  67#ifdef CONFIG_DEBUG_HIGHMEM
  68        /*
  69         * With debugging enabled, kunmap_atomic forces that entry to 0.
  70         * Make sure it was indeed properly unmapped.
  71         */
  72        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
  73#endif
  74        set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
  75        /*
  76         * When debugging is off, kunmap_atomic leaves the previous mapping
  77         * in place, so this TLB flush ensures the TLB is updated with the
  78         * new mapping.
  79         */
  80        local_flush_tlb_kernel_page(vaddr);
  81
  82        return (void *)vaddr;
  83}
  84EXPORT_SYMBOL(__kmap_atomic);
  85
  86void __kunmap_atomic(void *kvaddr)
  87{
  88        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  89        int idx, type;
  90
  91        if (kvaddr >= (void *)FIXADDR_START) {
  92                type = kmap_atomic_idx();
  93                idx = type + KM_TYPE_NR * smp_processor_id();
  94
  95                if (cache_is_vivt())
  96                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
  97#ifdef CONFIG_DEBUG_HIGHMEM
  98                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  99                set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
 100                local_flush_tlb_kernel_page(vaddr);
 101#else
 102                (void) idx;  /* to kill a warning */
 103#endif
 104                kmap_atomic_idx_pop();
 105        } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
 106                /* this address was obtained through kmap_high_get() */
 107                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
 108        }
 109        pagefault_enable();
 110}
 111EXPORT_SYMBOL(__kunmap_atomic);
 112
 113void *kmap_atomic_pfn(unsigned long pfn)
 114{
 115        unsigned long vaddr;
 116        int idx, type;
 117
 118        pagefault_disable();
 119
 120        type = kmap_atomic_idx_push();
 121        idx = type + KM_TYPE_NR * smp_processor_id();
 122        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 123#ifdef CONFIG_DEBUG_HIGHMEM
 124        BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
 125#endif
 126        set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
 127        local_flush_tlb_kernel_page(vaddr);
 128
 129        return (void *)vaddr;
 130}
 131
 132struct page *kmap_atomic_to_page(const void *ptr)
 133{
 134        unsigned long vaddr = (unsigned long)ptr;
 135        pte_t *pte;
 136
 137        if (vaddr < FIXADDR_START)
 138                return virt_to_page(ptr);
 139
 140        pte = TOP_PTE(vaddr);
 141        return pte_page(*pte);
 142}
 143