linux/arch/x86/mm/pgtable_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/sched.h>
   3#include <linux/kernel.h>
   4#include <linux/errno.h>
   5#include <linux/mm.h>
   6#include <linux/nmi.h>
   7#include <linux/swap.h>
   8#include <linux/smp.h>
   9#include <linux/highmem.h>
  10#include <linux/pagemap.h>
  11#include <linux/spinlock.h>
  12
  13#include <asm/cpu_entry_area.h>
  14#include <asm/pgtable.h>
  15#include <asm/pgalloc.h>
  16#include <asm/fixmap.h>
  17#include <asm/e820/api.h>
  18#include <asm/tlb.h>
  19#include <asm/tlbflush.h>
  20#include <asm/io.h>
  21
  22unsigned int __VMALLOC_RESERVE = 128 << 20;
  23
  24/*
  25 * Associate a virtual page frame with a given physical page frame 
  26 * and protection flags for that frame.
  27 */ 
  28void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  29{
  30        pgd_t *pgd;
  31        p4d_t *p4d;
  32        pud_t *pud;
  33        pmd_t *pmd;
  34        pte_t *pte;
  35
  36        pgd = swapper_pg_dir + pgd_index(vaddr);
  37        if (pgd_none(*pgd)) {
  38                BUG();
  39                return;
  40        }
  41        p4d = p4d_offset(pgd, vaddr);
  42        if (p4d_none(*p4d)) {
  43                BUG();
  44                return;
  45        }
  46        pud = pud_offset(p4d, vaddr);
  47        if (pud_none(*pud)) {
  48                BUG();
  49                return;
  50        }
  51        pmd = pmd_offset(pud, vaddr);
  52        if (pmd_none(*pmd)) {
  53                BUG();
  54                return;
  55        }
  56        pte = pte_offset_kernel(pmd, vaddr);
  57        if (!pte_none(pteval))
  58                set_pte_at(&init_mm, vaddr, pte, pteval);
  59        else
  60                pte_clear(&init_mm, vaddr, pte);
  61
  62        /*
  63         * It's enough to flush this one mapping.
  64         * (PGE mappings get flushed as well)
  65         */
  66        __flush_tlb_one_kernel(vaddr);
  67}
  68
  69unsigned long __FIXADDR_TOP = 0xfffff000;
  70EXPORT_SYMBOL(__FIXADDR_TOP);
  71
  72/*
  73 * vmalloc=size forces the vmalloc area to be exactly 'size'
  74 * bytes. This can be used to increase (or decrease) the
  75 * vmalloc area - the default is 128m.
  76 */
  77static int __init parse_vmalloc(char *arg)
  78{
  79        if (!arg)
  80                return -EINVAL;
  81
  82        /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
  83        __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
  84        return 0;
  85}
  86early_param("vmalloc", parse_vmalloc);
  87
  88/*
  89 * reservetop=size reserves a hole at the top of the kernel address space which
  90 * a hypervisor can load into later.  Needed for dynamically loaded hypervisors,
  91 * so relocating the fixmap can be done before paging initialization.
  92 */
  93static int __init parse_reservetop(char *arg)
  94{
  95        unsigned long address;
  96
  97        if (!arg)
  98                return -EINVAL;
  99
 100        address = memparse(arg, &arg);
 101        reserve_top_address(address);
 102        early_ioremap_init();
 103        return 0;
 104}
 105early_param("reservetop", parse_reservetop);
 106