linux/arch/sparc/mm/highmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  highmem.c: virtual kernel memory mappings for high memory
   4 *
   5 *  Provides kernel-static versions of atomic kmap functions originally
   6 *  found as inlines in include/asm-sparc/highmem.h.  These became
   7 *  needed as kmap_atomic() and kunmap_atomic() started getting
   8 *  called from within modules.
   9 *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  10 *
  11 *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
  12 *  modules because they are loaded with btfixup-ped functions.
  13 */
  14
  15/*
  16 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  17 * gives a more generic (and caching) interface. But kmap_atomic can
  18 * be used in IRQ contexts, so in some (very limited) cases we need it.
  19 *
  20 * XXX This is an old text. Actually, it's good to use atomic kmaps,
  21 * provided you remember that they are atomic and not try to sleep
  22 * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  23 * shared by CPUs, and so precious, and establishing them requires IPI.
  24 * Atomic kmaps are lightweight and we may have NCPUS more of them.
  25 */
  26#include <linux/highmem.h>
  27#include <linux/export.h>
  28#include <linux/mm.h>
  29
  30#include <asm/cacheflush.h>
  31#include <asm/tlbflush.h>
  32#include <asm/vaddrs.h>
  33
  34static pte_t *kmap_pte;
  35
  36void __init kmap_init(void)
  37{
  38        unsigned long address = __fix_to_virt(FIX_KMAP_BEGIN);
  39
  40        /* cache the first kmap pte */
  41        kmap_pte = virt_to_kpte(address);
  42}
  43
  44void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
  45{
  46        unsigned long vaddr;
  47        long idx, type;
  48
  49        type = kmap_atomic_idx_push();
  50        idx = type + KM_TYPE_NR*smp_processor_id();
  51        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  52
  53/* XXX Fix - Anton */
  54#if 0
  55        __flush_cache_one(vaddr);
  56#else
  57        flush_cache_all();
  58#endif
  59
  60#ifdef CONFIG_DEBUG_HIGHMEM
  61        BUG_ON(!pte_none(*(kmap_pte-idx)));
  62#endif
  63        set_pte(kmap_pte-idx, mk_pte(page, prot));
  64/* XXX Fix - Anton */
  65#if 0
  66        __flush_tlb_one(vaddr);
  67#else
  68        flush_tlb_all();
  69#endif
  70
  71        return (void*) vaddr;
  72}
  73EXPORT_SYMBOL(kmap_atomic_high_prot);
  74
  75void kunmap_atomic_high(void *kvaddr)
  76{
  77        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  78        int type;
  79
  80        if (vaddr < FIXADDR_START)
  81                return;
  82
  83        type = kmap_atomic_idx();
  84
  85#ifdef CONFIG_DEBUG_HIGHMEM
  86        {
  87                unsigned long idx;
  88
  89                idx = type + KM_TYPE_NR * smp_processor_id();
  90                BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  91
  92                /* XXX Fix - Anton */
  93#if 0
  94                __flush_cache_one(vaddr);
  95#else
  96                flush_cache_all();
  97#endif
  98
  99                /*
 100                 * force other mappings to Oops if they'll try to access
 101                 * this pte without first remap it
 102                 */
 103                pte_clear(&init_mm, vaddr, kmap_pte-idx);
 104                /* XXX Fix - Anton */
 105#if 0
 106                __flush_tlb_one(vaddr);
 107#else
 108                flush_tlb_all();
 109#endif
 110        }
 111#endif
 112
 113        kmap_atomic_idx_pop();
 114}
 115EXPORT_SYMBOL(kunmap_atomic_high);
 116