linux/arch/ia64/mm/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Initialize MMU support.
   4 *
   5 * Copyright (C) 1998-2003 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10
  11#include <linux/dma-map-ops.h>
  12#include <linux/dmar.h>
  13#include <linux/efi.h>
  14#include <linux/elf.h>
  15#include <linux/memblock.h>
  16#include <linux/mm.h>
  17#include <linux/sched/signal.h>
  18#include <linux/mmzone.h>
  19#include <linux/module.h>
  20#include <linux/personality.h>
  21#include <linux/reboot.h>
  22#include <linux/slab.h>
  23#include <linux/swap.h>
  24#include <linux/proc_fs.h>
  25#include <linux/bitops.h>
  26#include <linux/kexec.h>
  27#include <linux/swiotlb.h>
  28
  29#include <asm/dma.h>
  30#include <asm/efi.h>
  31#include <asm/io.h>
  32#include <asm/numa.h>
  33#include <asm/patch.h>
  34#include <asm/pgalloc.h>
  35#include <asm/sal.h>
  36#include <asm/sections.h>
  37#include <asm/tlb.h>
  38#include <linux/uaccess.h>
  39#include <asm/unistd.h>
  40#include <asm/mca.h>
  41
  42extern void ia64_tlb_init (void);
  43
  44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
  45
  46struct page *zero_page_memmap_ptr;      /* map entry for zero page */
  47EXPORT_SYMBOL(zero_page_memmap_ptr);
  48
  49void
  50__ia64_sync_icache_dcache (pte_t pte)
  51{
  52        unsigned long addr;
  53        struct page *page;
  54
  55        page = pte_page(pte);
  56        addr = (unsigned long) page_address(page);
  57
  58        if (test_bit(PG_arch_1, &page->flags))
  59                return;                         /* i-cache is already coherent with d-cache */
  60
  61        flush_icache_range(addr, addr + page_size(page));
  62        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
  63}
  64
  65/*
  66 * Since DMA is i-cache coherent, any (complete) pages that were written via
  67 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  68 * flush them when they get mapped into an executable vm-area.
  69 */
  70void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
  71{
  72        unsigned long pfn = PHYS_PFN(paddr);
  73
  74        do {
  75                set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
  76        } while (++pfn <= PHYS_PFN(paddr + size - 1));
  77}
  78
  79inline void
  80ia64_set_rbs_bot (void)
  81{
  82        unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
  83
  84        if (stack_size > MAX_USER_STACK_SIZE)
  85                stack_size = MAX_USER_STACK_SIZE;
  86        current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
  87}
  88
  89/*
  90 * This performs some platform-dependent address space initialization.
  91 * On IA-64, we want to setup the VM area for the register backing
  92 * store (which grows upwards) and install the gateway page which is
  93 * used for signal trampolines, etc.
  94 */
  95void
  96ia64_init_addr_space (void)
  97{
  98        struct vm_area_struct *vma;
  99
 100        ia64_set_rbs_bot();
 101
 102        /*
 103         * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
 104         * the problem.  When the process attempts to write to the register backing store
 105         * for the first time, it will get a SEGFAULT in this case.
 106         */
 107        vma = vm_area_alloc(current->mm);
 108        if (vma) {
 109                vma_set_anonymous(vma);
 110                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
 111                vma->vm_end = vma->vm_start + PAGE_SIZE;
 112                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
 113                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 114                mmap_write_lock(current->mm);
 115                if (insert_vm_struct(current->mm, vma)) {
 116                        mmap_write_unlock(current->mm);
 117                        vm_area_free(vma);
 118                        return;
 119                }
 120                mmap_write_unlock(current->mm);
 121        }
 122
 123        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
 124        if (!(current->personality & MMAP_PAGE_ZERO)) {
 125                vma = vm_area_alloc(current->mm);
 126                if (vma) {
 127                        vma_set_anonymous(vma);
 128                        vma->vm_end = PAGE_SIZE;
 129                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
 130                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
 131                                        VM_DONTEXPAND | VM_DONTDUMP;
 132                        mmap_write_lock(current->mm);
 133                        if (insert_vm_struct(current->mm, vma)) {
 134                                mmap_write_unlock(current->mm);
 135                                vm_area_free(vma);
 136                                return;
 137                        }
 138                        mmap_write_unlock(current->mm);
 139                }
 140        }
 141}
 142
 143void
 144free_initmem (void)
 145{
 146        free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
 147                           -1, "unused kernel");
 148}
 149
 150void __init
 151free_initrd_mem (unsigned long start, unsigned long end)
 152{
 153        /*
 154         * EFI uses 4KB pages while the kernel can use 4KB or bigger.
 155         * Thus EFI and the kernel may have different page sizes. It is
 156         * therefore possible to have the initrd share the same page as
 157         * the end of the kernel (given current setup).
 158         *
 159         * To avoid freeing/using the wrong page (kernel sized) we:
 160         *      - align up the beginning of initrd
 161         *      - align down the end of initrd
 162         *
 163         *  |             |
 164         *  |=============| a000
 165         *  |             |
 166         *  |             |
 167         *  |             | 9000
 168         *  |/////////////|
 169         *  |/////////////|
 170         *  |=============| 8000
 171         *  |///INITRD////|
 172         *  |/////////////|
 173         *  |/////////////| 7000
 174         *  |             |
 175         *  |KKKKKKKKKKKKK|
 176         *  |=============| 6000
 177         *  |KKKKKKKKKKKKK|
 178         *  |KKKKKKKKKKKKK|
 179         *  K=kernel using 8KB pages
 180         *
 181         * In this example, we must free page 8000 ONLY. So we must align up
 182         * initrd_start and keep initrd_end as is.
 183         */
 184        start = PAGE_ALIGN(start);
 185        end = end & PAGE_MASK;
 186
 187        if (start < end)
 188                printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
 189
 190        for (; start < end; start += PAGE_SIZE) {
 191                if (!virt_addr_valid(start))
 192                        continue;
 193                free_reserved_page(virt_to_page(start));
 194        }
 195}
 196
 197/*
 198 * This installs a clean page in the kernel's page table.
 199 */
 200static struct page * __init
 201put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
 202{
 203        pgd_t *pgd;
 204        p4d_t *p4d;
 205        pud_t *pud;
 206        pmd_t *pmd;
 207        pte_t *pte;
 208
 209        pgd = pgd_offset_k(address);            /* note: this is NOT pgd_offset()! */
 210
 211        {
 212                p4d = p4d_alloc(&init_mm, pgd, address);
 213                if (!p4d)
 214                        goto out;
 215                pud = pud_alloc(&init_mm, p4d, address);
 216                if (!pud)
 217                        goto out;
 218                pmd = pmd_alloc(&init_mm, pud, address);
 219                if (!pmd)
 220                        goto out;
 221                pte = pte_alloc_kernel(pmd, address);
 222                if (!pte)
 223                        goto out;
 224                if (!pte_none(*pte))
 225                        goto out;
 226                set_pte(pte, mk_pte(page, pgprot));
 227        }
 228  out:
 229        /* no need for flush_tlb */
 230        return page;
 231}
 232
 233static void __init
 234setup_gate (void)
 235{
 236        struct page *page;
 237
 238        /*
 239         * Map the gate page twice: once read-only to export the ELF
 240         * headers etc. and once execute-only page to enable
 241         * privilege-promotion via "epc":
 242         */
 243        page = virt_to_page(ia64_imva(__start_gate_section));
 244        put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
 245#ifdef HAVE_BUGGY_SEGREL
 246        page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
 247        put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
 248#else
 249        put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
 250        /* Fill in the holes (if any) with read-only zero pages: */
 251        {
 252                unsigned long addr;
 253
 254                for (addr = GATE_ADDR + PAGE_SIZE;
 255                     addr < GATE_ADDR + PERCPU_PAGE_SIZE;
 256                     addr += PAGE_SIZE)
 257                {
 258                        put_kernel_page(ZERO_PAGE(0), addr,
 259                                        PAGE_READONLY);
 260                        put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
 261                                        PAGE_READONLY);
 262                }
 263        }
 264#endif
 265        ia64_patch_gate();
 266}
 267
 268static struct vm_area_struct gate_vma;
 269
 270static int __init gate_vma_init(void)
 271{
 272        vma_init(&gate_vma, NULL);
 273        gate_vma.vm_start = FIXADDR_USER_START;
 274        gate_vma.vm_end = FIXADDR_USER_END;
 275        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
 276        gate_vma.vm_page_prot = __P101;
 277
 278        return 0;
 279}
 280__initcall(gate_vma_init);
 281
 282struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 283{
 284        return &gate_vma;
 285}
 286
 287int in_gate_area_no_mm(unsigned long addr)
 288{
 289        if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
 290                return 1;
 291        return 0;
 292}
 293
 294int in_gate_area(struct mm_struct *mm, unsigned long addr)
 295{
 296        return in_gate_area_no_mm(addr);
 297}
 298
 299void ia64_mmu_init(void *my_cpu_data)
 300{
 301        unsigned long pta, impl_va_bits;
 302        extern void tlb_init(void);
 303
 304#ifdef CONFIG_DISABLE_VHPT
 305#       define VHPT_ENABLE_BIT  0
 306#else
 307#       define VHPT_ENABLE_BIT  1
 308#endif
 309
 310        /*
 311         * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
 312         * address space.  The IA-64 architecture guarantees that at least 50 bits of
 313         * virtual address space are implemented but if we pick a large enough page size
 314         * (e.g., 64KB), the mapped address space is big enough that it will overlap with
 315         * VMLPT.  I assume that once we run on machines big enough to warrant 64KB pages,
 316         * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
 317         * problem in practice.  Alternatively, we could truncate the top of the mapped
 318         * address space to not permit mappings that would overlap with the VMLPT.
 319         * --davidm 00/12/06
 320         */
 321#       define pte_bits                 3
 322#       define mapped_space_bits        (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
 323        /*
 324         * The virtual page table has to cover the entire implemented address space within
 325         * a region even though not all of this space may be mappable.  The reason for
 326         * this is that the Access bit and Dirty bit fault handlers perform
 327         * non-speculative accesses to the virtual page table, so the address range of the
 328         * virtual page table itself needs to be covered by virtual page table.
 329         */
 330#       define vmlpt_bits               (impl_va_bits - PAGE_SHIFT + pte_bits)
 331#       define POW2(n)                  (1ULL << (n))
 332
 333        impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
 334
 335        if (impl_va_bits < 51 || impl_va_bits > 61)
 336                panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
 337        /*
 338         * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
 339         * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
 340         * the test makes sure that our mapped space doesn't overlap the
 341         * unimplemented hole in the middle of the region.
 342         */
 343        if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
 344            (mapped_space_bits > impl_va_bits - 1))
 345                panic("Cannot build a big enough virtual-linear page table"
 346                      " to cover mapped address space.\n"
 347                      " Try using a smaller page size.\n");
 348
 349
 350        /* place the VMLPT at the end of each page-table mapped region: */
 351        pta = POW2(61) - POW2(vmlpt_bits);
 352
 353        /*
 354         * Set the (virtually mapped linear) page table address.  Bit
 355         * 8 selects between the short and long format, bits 2-7 the
 356         * size of the table, and bit 0 whether the VHPT walker is
 357         * enabled.
 358         */
 359        ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
 360
 361        ia64_tlb_init();
 362
 363#ifdef  CONFIG_HUGETLB_PAGE
 364        ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
 365        ia64_srlz_d();
 366#endif
 367}
 368
 369int __init register_active_ranges(u64 start, u64 len, int nid)
 370{
 371        u64 end = start + len;
 372
 373#ifdef CONFIG_KEXEC
 374        if (start > crashk_res.start && start < crashk_res.end)
 375                start = crashk_res.end;
 376        if (end > crashk_res.start && end < crashk_res.end)
 377                end = crashk_res.start;
 378#endif
 379
 380        if (start < end)
 381                memblock_add_node(__pa(start), end - start, nid);
 382        return 0;
 383}
 384
 385int
 386find_max_min_low_pfn (u64 start, u64 end, void *arg)
 387{
 388        unsigned long pfn_start, pfn_end;
 389#ifdef CONFIG_FLATMEM
 390        pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
 391        pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
 392#else
 393        pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
 394        pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
 395#endif
 396        min_low_pfn = min(min_low_pfn, pfn_start);
 397        max_low_pfn = max(max_low_pfn, pfn_end);
 398        return 0;
 399}
 400
 401/*
 402 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
 403 * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
 404 * down into the kernel and calling the normal (heavy-weight) syscall handler.  This is
 405 * useful for performance testing, but conceivably could also come in handy for debugging
 406 * purposes.
 407 */
 408
 409static int nolwsys __initdata;
 410
 411static int __init
 412nolwsys_setup (char *s)
 413{
 414        nolwsys = 1;
 415        return 1;
 416}
 417
 418__setup("nolwsys", nolwsys_setup);
 419
 420void __init
 421mem_init (void)
 422{
 423        int i;
 424
 425        BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
 426        BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
 427        BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
 428
 429        /*
 430         * This needs to be called _after_ the command line has been parsed but
 431         * _before_ any drivers that may need the PCI DMA interface are
 432         * initialized or bootmem has been freed.
 433         */
 434        do {
 435#ifdef CONFIG_INTEL_IOMMU
 436                detect_intel_iommu();
 437                if (iommu_detected)
 438                        break;
 439#endif
 440#ifdef CONFIG_SWIOTLB
 441                swiotlb_init(1);
 442#endif
 443        } while (0);
 444
 445#ifdef CONFIG_FLATMEM
 446        BUG_ON(!mem_map);
 447#endif
 448
 449        set_max_mapnr(max_low_pfn);
 450        high_memory = __va(max_low_pfn * PAGE_SIZE);
 451        memblock_free_all();
 452
 453        /*
 454         * For fsyscall entrpoints with no light-weight handler, use the ordinary
 455         * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
 456         * code can tell them apart.
 457         */
 458        for (i = 0; i < NR_syscalls; ++i) {
 459                extern unsigned long fsyscall_table[NR_syscalls];
 460                extern unsigned long sys_call_table[NR_syscalls];
 461
 462                if (!fsyscall_table[i] || nolwsys)
 463                        fsyscall_table[i] = sys_call_table[i] | 1;
 464        }
 465        setup_gate();
 466}
 467
 468#ifdef CONFIG_MEMORY_HOTPLUG
 469int arch_add_memory(int nid, u64 start, u64 size,
 470                    struct mhp_params *params)
 471{
 472        unsigned long start_pfn = start >> PAGE_SHIFT;
 473        unsigned long nr_pages = size >> PAGE_SHIFT;
 474        int ret;
 475
 476        if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
 477                return -EINVAL;
 478
 479        ret = __add_pages(nid, start_pfn, nr_pages, params);
 480        if (ret)
 481                printk("%s: Problem encountered in __add_pages() as ret=%d\n",
 482                       __func__,  ret);
 483
 484        return ret;
 485}
 486
 487void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 488{
 489        unsigned long start_pfn = start >> PAGE_SHIFT;
 490        unsigned long nr_pages = size >> PAGE_SHIFT;
 491
 492        __remove_pages(start_pfn, nr_pages, altmap);
 493}
 494#endif
 495