linux/arch/mn10300/include/asm/highmem.h
<<
>>
Prefs
   1/* MN10300 Virtual kernel memory mappings for high memory
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 * - Derived from include/asm-i386/highmem.h
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public Licence
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the Licence, or (at your option) any later version.
  11 */
  12#ifndef _ASM_HIGHMEM_H
  13#define _ASM_HIGHMEM_H
  14
  15#ifdef __KERNEL__
  16
  17#include <linux/init.h>
  18#include <linux/interrupt.h>
  19#include <linux/highmem.h>
  20#include <asm/kmap_types.h>
  21#include <asm/pgtable.h>
  22
  23/* undef for production */
  24#undef HIGHMEM_DEBUG
  25
  26/* declarations for highmem.c */
  27extern unsigned long highstart_pfn, highend_pfn;
  28
  29extern pte_t *kmap_pte;
  30extern pgprot_t kmap_prot;
  31extern pte_t *pkmap_page_table;
  32
  33extern void __init kmap_init(void);
  34
  35/*
  36 * Right now we initialize only a single pte table. It can be extended
  37 * easily, subsequent pte tables have to be allocated in one physical
  38 * chunk of RAM.
  39 */
  40#define PKMAP_BASE      0xfe000000UL
  41#define LAST_PKMAP      1024
  42#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  43#define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  44#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  45
  46extern unsigned long kmap_high(struct page *page);
  47extern void kunmap_high(struct page *page);
  48
  49static inline unsigned long kmap(struct page *page)
  50{
  51        if (in_interrupt())
  52                BUG();
  53        if (page < highmem_start_page)
  54                return page_address(page);
  55        return kmap_high(page);
  56}
  57
  58static inline void kunmap(struct page *page)
  59{
  60        if (in_interrupt())
  61                BUG();
  62        if (page < highmem_start_page)
  63                return;
  64        kunmap_high(page);
  65}
  66
  67/*
  68 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  69 * gives a more generic (and caching) interface. But kmap_atomic can
  70 * be used in IRQ contexts, so in some (very limited) cases we need
  71 * it.
  72 */
  73static inline unsigned long __kmap_atomic(struct page *page)
  74{
  75        unsigned long vaddr;
  76        int idx, type;
  77
  78        pagefault_disable();
  79        if (page < highmem_start_page)
  80                return page_address(page);
  81
  82        type = kmap_atomic_idx_push();
  83        idx = type + KM_TYPE_NR * smp_processor_id();
  84        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  85#if HIGHMEM_DEBUG
  86        if (!pte_none(*(kmap_pte - idx)))
  87                BUG();
  88#endif
  89        set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
  90        local_flush_tlb_one(vaddr);
  91
  92        return vaddr;
  93}
  94
  95static inline void __kunmap_atomic(unsigned long vaddr)
  96{
  97        int type;
  98
  99        if (vaddr < FIXADDR_START) { /* FIXME */
 100                pagefault_enable();
 101                return;
 102        }
 103
 104        type = kmap_atomic_idx();
 105
 106#if HIGHMEM_DEBUG
 107        {
 108                unsigned int idx;
 109                idx = type + KM_TYPE_NR * smp_processor_id();
 110
 111                if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
 112                        BUG();
 113
 114                /*
 115                 * force other mappings to Oops if they'll try to access
 116                 * this pte without first remap it
 117                 */
 118                pte_clear(kmap_pte - idx);
 119                local_flush_tlb_one(vaddr);
 120        }
 121#endif
 122
 123        kmap_atomic_idx_pop();
 124        pagefault_enable();
 125}
 126#endif /* __KERNEL__ */
 127
 128#endif /* _ASM_HIGHMEM_H */
 129