linux/mm/sparse-vmemmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Virtual Memory Map support
   4 *
   5 * (C) 2007 sgi. Christoph Lameter.
   6 *
   7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
   8 * virt_to_page, page_address() to be implemented as a base offset
   9 * calculation without memory access.
  10 *
  11 * However, virtual mappings need a page table and TLBs. Many Linux
  12 * architectures already map their physical space using 1-1 mappings
  13 * via TLBs. For those arches the virtual memory map is essentially
  14 * for free if we use the same page size as the 1-1 mappings. In that
  15 * case the overhead consists of a few additional pages that are
  16 * allocated to create a view of memory for vmemmap.
  17 *
  18 * The architecture is expected to provide a vmemmap_populate() function
  19 * to instantiate the mapping.
  20 */
  21#include <linux/mm.h>
  22#include <linux/mmzone.h>
  23#include <linux/memblock.h>
  24#include <linux/memremap.h>
  25#include <linux/highmem.h>
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/vmalloc.h>
  29#include <linux/sched.h>
  30#include <asm/dma.h>
  31#include <asm/pgalloc.h>
  32#include <asm/pgtable.h>
  33
  34/*
  35 * Allocate a block of memory to be used to back the virtual memory map
  36 * or to back the page tables that are used to create the mapping.
  37 * Uses the main allocators if they are available, else bootmem.
  38 */
  39
  40static void * __ref __earlyonly_bootmem_alloc(int node,
  41                                unsigned long size,
  42                                unsigned long align,
  43                                unsigned long goal)
  44{
  45        return memblock_alloc_try_nid_raw(size, align, goal,
  46                                               MEMBLOCK_ALLOC_ACCESSIBLE, node);
  47}
  48
  49void * __meminit vmemmap_alloc_block(unsigned long size, int node)
  50{
  51        /* If the main allocator is up use that, fallback to bootmem. */
  52        if (slab_is_available()) {
  53                gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
  54                int order = get_order(size);
  55                static bool warned;
  56                struct page *page;
  57
  58                page = alloc_pages_node(node, gfp_mask, order);
  59                if (page)
  60                        return page_address(page);
  61
  62                if (!warned) {
  63                        warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
  64                                   "vmemmap alloc failure: order:%u", order);
  65                        warned = true;
  66                }
  67                return NULL;
  68        } else
  69                return __earlyonly_bootmem_alloc(node, size, size,
  70                                __pa(MAX_DMA_ADDRESS));
  71}
  72
  73/* need to make sure size is all the same during early stage */
  74void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
  75{
  76        void *ptr = sparse_buffer_alloc(size);
  77
  78        if (!ptr)
  79                ptr = vmemmap_alloc_block(size, node);
  80        return ptr;
  81}
  82
  83static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
  84{
  85        return altmap->base_pfn + altmap->reserve + altmap->alloc
  86                + altmap->align;
  87}
  88
  89static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
  90{
  91        unsigned long allocated = altmap->alloc + altmap->align;
  92
  93        if (altmap->free > allocated)
  94                return altmap->free - allocated;
  95        return 0;
  96}
  97
  98/**
  99 * altmap_alloc_block_buf - allocate pages from the device page map
 100 * @altmap:     device page map
 101 * @size:       size (in bytes) of the allocation
 102 *
 103 * Allocations are aligned to the size of the request.
 104 */
 105void * __meminit altmap_alloc_block_buf(unsigned long size,
 106                struct vmem_altmap *altmap)
 107{
 108        unsigned long pfn, nr_pfns, nr_align;
 109
 110        if (size & ~PAGE_MASK) {
 111                pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
 112                                __func__, size);
 113                return NULL;
 114        }
 115
 116        pfn = vmem_altmap_next_pfn(altmap);
 117        nr_pfns = size >> PAGE_SHIFT;
 118        nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
 119        nr_align = ALIGN(pfn, nr_align) - pfn;
 120        if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
 121                return NULL;
 122
 123        altmap->alloc += nr_pfns;
 124        altmap->align += nr_align;
 125        pfn += nr_align;
 126
 127        pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
 128                        __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
 129        return __va(__pfn_to_phys(pfn));
 130}
 131
 132void __meminit vmemmap_verify(pte_t *pte, int node,
 133                                unsigned long start, unsigned long end)
 134{
 135        unsigned long pfn = pte_pfn(*pte);
 136        int actual_node = early_pfn_to_nid(pfn);
 137
 138        if (node_distance(actual_node, node) > LOCAL_DISTANCE)
 139                pr_warn("[%lx-%lx] potential offnode page_structs\n",
 140                        start, end - 1);
 141}
 142
 143pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
 144{
 145        pte_t *pte = pte_offset_kernel(pmd, addr);
 146        if (pte_none(*pte)) {
 147                pte_t entry;
 148                void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
 149                if (!p)
 150                        return NULL;
 151                entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
 152                set_pte_at(&init_mm, addr, pte, entry);
 153        }
 154        return pte;
 155}
 156
 157static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
 158{
 159        void *p = vmemmap_alloc_block(size, node);
 160
 161        if (!p)
 162                return NULL;
 163        memset(p, 0, size);
 164
 165        return p;
 166}
 167
 168pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
 169{
 170        pmd_t *pmd = pmd_offset(pud, addr);
 171        if (pmd_none(*pmd)) {
 172                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 173                if (!p)
 174                        return NULL;
 175                pmd_populate_kernel(&init_mm, pmd, p);
 176        }
 177        return pmd;
 178}
 179
 180pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
 181{
 182        pud_t *pud = pud_offset(p4d, addr);
 183        if (pud_none(*pud)) {
 184                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 185                if (!p)
 186                        return NULL;
 187                pud_populate(&init_mm, pud, p);
 188        }
 189        return pud;
 190}
 191
 192p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
 193{
 194        p4d_t *p4d = p4d_offset(pgd, addr);
 195        if (p4d_none(*p4d)) {
 196                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 197                if (!p)
 198                        return NULL;
 199                p4d_populate(&init_mm, p4d, p);
 200        }
 201        return p4d;
 202}
 203
 204pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
 205{
 206        pgd_t *pgd = pgd_offset_k(addr);
 207        if (pgd_none(*pgd)) {
 208                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
 209                if (!p)
 210                        return NULL;
 211                pgd_populate(&init_mm, pgd, p);
 212        }
 213        return pgd;
 214}
 215
 216int __meminit vmemmap_populate_basepages(unsigned long start,
 217                                         unsigned long end, int node)
 218{
 219        unsigned long addr = start;
 220        pgd_t *pgd;
 221        p4d_t *p4d;
 222        pud_t *pud;
 223        pmd_t *pmd;
 224        pte_t *pte;
 225
 226        for (; addr < end; addr += PAGE_SIZE) {
 227                pgd = vmemmap_pgd_populate(addr, node);
 228                if (!pgd)
 229                        return -ENOMEM;
 230                p4d = vmemmap_p4d_populate(pgd, addr, node);
 231                if (!p4d)
 232                        return -ENOMEM;
 233                pud = vmemmap_pud_populate(p4d, addr, node);
 234                if (!pud)
 235                        return -ENOMEM;
 236                pmd = vmemmap_pmd_populate(pud, addr, node);
 237                if (!pmd)
 238                        return -ENOMEM;
 239                pte = vmemmap_pte_populate(pmd, addr, node);
 240                if (!pte)
 241                        return -ENOMEM;
 242                vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
 243        }
 244
 245        return 0;
 246}
 247
 248struct page * __meminit __populate_section_memmap(unsigned long pfn,
 249                unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 250{
 251        unsigned long start;
 252        unsigned long end;
 253
 254        /*
 255         * The minimum granularity of memmap extensions is
 256         * PAGES_PER_SUBSECTION as allocations are tracked in the
 257         * 'subsection_map' bitmap of the section.
 258         */
 259        end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
 260        pfn &= PAGE_SUBSECTION_MASK;
 261        nr_pages = end - pfn;
 262
 263        start = (unsigned long) pfn_to_page(pfn);
 264        end = start + nr_pages * sizeof(struct page);
 265
 266        if (vmemmap_populate(start, end, nid, altmap))
 267                return NULL;
 268
 269        return pfn_to_page(pfn);
 270}
 271