linux/arch/csky/mm/highmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
   3
   4#include <linux/module.h>
   5#include <linux/highmem.h>
   6#include <linux/smp.h>
   7#include <linux/memblock.h>
   8#include <asm/fixmap.h>
   9#include <asm/tlbflush.h>
  10#include <asm/cacheflush.h>
  11
  12static pte_t *kmap_pte;
  13
  14unsigned long highstart_pfn, highend_pfn;
  15
  16void *kmap(struct page *page)
  17{
  18        void *addr;
  19
  20        might_sleep();
  21        if (!PageHighMem(page))
  22                return page_address(page);
  23        addr = kmap_high(page);
  24        flush_tlb_one((unsigned long)addr);
  25
  26        return addr;
  27}
  28EXPORT_SYMBOL(kmap);
  29
  30void kunmap(struct page *page)
  31{
  32        BUG_ON(in_interrupt());
  33        if (!PageHighMem(page))
  34                return;
  35        kunmap_high(page);
  36}
  37EXPORT_SYMBOL(kunmap);
  38
  39void *kmap_atomic(struct page *page)
  40{
  41        unsigned long vaddr;
  42        int idx, type;
  43
  44        preempt_disable();
  45        pagefault_disable();
  46        if (!PageHighMem(page))
  47                return page_address(page);
  48
  49        type = kmap_atomic_idx_push();
  50        idx = type + KM_TYPE_NR*smp_processor_id();
  51        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  52#ifdef CONFIG_DEBUG_HIGHMEM
  53        BUG_ON(!pte_none(*(kmap_pte - idx)));
  54#endif
  55        set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  56        flush_tlb_one((unsigned long)vaddr);
  57
  58        return (void *)vaddr;
  59}
  60EXPORT_SYMBOL(kmap_atomic);
  61
  62void __kunmap_atomic(void *kvaddr)
  63{
  64        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  65        int idx;
  66
  67        if (vaddr < FIXADDR_START)
  68                goto out;
  69
  70#ifdef CONFIG_DEBUG_HIGHMEM
  71        idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
  72
  73        BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  74
  75        pte_clear(&init_mm, vaddr, kmap_pte - idx);
  76        flush_tlb_one(vaddr);
  77#else
  78        (void) idx; /* to kill a warning */
  79#endif
  80        kmap_atomic_idx_pop();
  81out:
  82        pagefault_enable();
  83        preempt_enable();
  84}
  85EXPORT_SYMBOL(__kunmap_atomic);
  86
  87/*
  88 * This is the same as kmap_atomic() but can map memory that doesn't
  89 * have a struct page associated with it.
  90 */
  91void *kmap_atomic_pfn(unsigned long pfn)
  92{
  93        unsigned long vaddr;
  94        int idx, type;
  95
  96        pagefault_disable();
  97
  98        type = kmap_atomic_idx_push();
  99        idx = type + KM_TYPE_NR*smp_processor_id();
 100        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 101        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
 102        flush_tlb_one(vaddr);
 103
 104        return (void *) vaddr;
 105}
 106
 107struct page *kmap_atomic_to_page(void *ptr)
 108{
 109        unsigned long idx, vaddr = (unsigned long)ptr;
 110        pte_t *pte;
 111
 112        if (vaddr < FIXADDR_START)
 113                return virt_to_page(ptr);
 114
 115        idx = virt_to_fix(vaddr);
 116        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
 117        return pte_page(*pte);
 118}
 119
 120static void __init fixrange_init(unsigned long start, unsigned long end,
 121                                pgd_t *pgd_base)
 122{
 123#ifdef CONFIG_HIGHMEM
 124        pgd_t *pgd;
 125        pud_t *pud;
 126        pmd_t *pmd;
 127        pte_t *pte;
 128        int i, j, k;
 129        unsigned long vaddr;
 130
 131        vaddr = start;
 132        i = __pgd_offset(vaddr);
 133        j = __pud_offset(vaddr);
 134        k = __pmd_offset(vaddr);
 135        pgd = pgd_base + i;
 136
 137        for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
 138                pud = (pud_t *)pgd;
 139                for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
 140                        pmd = (pmd_t *)pud;
 141                        for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
 142                                if (pmd_none(*pmd)) {
 143                                        pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 144                                        if (!pte)
 145                                                panic("%s: Failed to allocate %lu bytes align=%lx\n",
 146                                                      __func__, PAGE_SIZE,
 147                                                      PAGE_SIZE);
 148
 149                                        set_pmd(pmd, __pmd(__pa(pte)));
 150                                        BUG_ON(pte != pte_offset_kernel(pmd, 0));
 151                                }
 152                                vaddr += PMD_SIZE;
 153                        }
 154                        k = 0;
 155                }
 156                j = 0;
 157        }
 158#endif
 159}
 160
 161void __init fixaddr_kmap_pages_init(void)
 162{
 163        unsigned long vaddr;
 164        pgd_t *pgd_base;
 165#ifdef CONFIG_HIGHMEM
 166        pgd_t *pgd;
 167        pmd_t *pmd;
 168        pud_t *pud;
 169        pte_t *pte;
 170#endif
 171        pgd_base = swapper_pg_dir;
 172
 173        /*
 174         * Fixed mappings:
 175         */
 176        vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
 177        fixrange_init(vaddr, 0, pgd_base);
 178
 179#ifdef CONFIG_HIGHMEM
 180        /*
 181         * Permanent kmaps:
 182         */
 183        vaddr = PKMAP_BASE;
 184        fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
 185
 186        pgd = swapper_pg_dir + __pgd_offset(vaddr);
 187        pud = (pud_t *)pgd;
 188        pmd = pmd_offset(pud, vaddr);
 189        pte = pte_offset_kernel(pmd, vaddr);
 190        pkmap_page_table = pte;
 191#endif
 192}
 193
 194void __init kmap_init(void)
 195{
 196        unsigned long vaddr;
 197
 198        fixaddr_kmap_pages_init();
 199
 200        vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
 201
 202        kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
 203}
 204