linux/arch/s390/mm/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999
   5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   6 *
   7 *  Derived from "arch/i386/mm/init.c"
   8 *    Copyright (C) 1995  Linus Torvalds
   9 */
  10
  11#include <linux/signal.h>
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/errno.h>
  15#include <linux/string.h>
  16#include <linux/types.h>
  17#include <linux/ptrace.h>
  18#include <linux/mman.h>
  19#include <linux/mm.h>
  20#include <linux/swap.h>
  21#include <linux/swiotlb.h>
  22#include <linux/smp.h>
  23#include <linux/init.h>
  24#include <linux/pagemap.h>
  25#include <linux/memblock.h>
  26#include <linux/memory.h>
  27#include <linux/pfn.h>
  28#include <linux/poison.h>
  29#include <linux/initrd.h>
  30#include <linux/export.h>
  31#include <linux/cma.h>
  32#include <linux/gfp.h>
  33#include <linux/dma-direct.h>
  34#include <asm/processor.h>
  35#include <linux/uaccess.h>
  36#include <asm/pgalloc.h>
  37#include <asm/kfence.h>
  38#include <asm/ptdump.h>
  39#include <asm/dma.h>
  40#include <asm/lowcore.h>
  41#include <asm/tlb.h>
  42#include <asm/tlbflush.h>
  43#include <asm/sections.h>
  44#include <asm/ctl_reg.h>
  45#include <asm/sclp.h>
  46#include <asm/set_memory.h>
  47#include <asm/kasan.h>
  48#include <asm/dma-mapping.h>
  49#include <asm/uv.h>
  50#include <linux/virtio_config.h>
  51
  52pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
  53static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
  54
  55unsigned long s390_invalid_asce;
  56
  57unsigned long empty_zero_page, zero_page_mask;
  58EXPORT_SYMBOL(empty_zero_page);
  59EXPORT_SYMBOL(zero_page_mask);
  60
  61bool initmem_freed;
  62
  63static void __init setup_zero_pages(void)
  64{
  65        unsigned int order;
  66        struct page *page;
  67        int i;
  68
  69        /* Latest machines require a mapping granularity of 512KB */
  70        order = 7;
  71
  72        /* Limit number of empty zero pages for small memory sizes */
  73        while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
  74                order--;
  75
  76        empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  77        if (!empty_zero_page)
  78                panic("Out of memory in setup_zero_pages");
  79
  80        page = virt_to_page((void *) empty_zero_page);
  81        split_page(page, order);
  82        for (i = 1 << order; i > 0; i--) {
  83                mark_page_reserved(page);
  84                page++;
  85        }
  86
  87        zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  88}
  89
  90/*
  91 * paging_init() sets up the page tables
  92 */
  93void __init paging_init(void)
  94{
  95        unsigned long max_zone_pfns[MAX_NR_ZONES];
  96        unsigned long pgd_type, asce_bits;
  97        psw_t psw;
  98
  99        s390_invalid_asce  = (unsigned long)invalid_pg_dir;
 100        s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
 101        crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
 102        init_mm.pgd = swapper_pg_dir;
 103        if (VMALLOC_END > _REGION2_SIZE) {
 104                asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
 105                pgd_type = _REGION2_ENTRY_EMPTY;
 106        } else {
 107                asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
 108                pgd_type = _REGION3_ENTRY_EMPTY;
 109        }
 110        init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
 111        S390_lowcore.kernel_asce = init_mm.context.asce;
 112        S390_lowcore.user_asce = s390_invalid_asce;
 113        crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
 114        vmem_map_init();
 115        kasan_copy_shadow_mapping();
 116
 117        /* enable virtual mapping in kernel mode */
 118        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
 119        __ctl_load(S390_lowcore.user_asce, 7, 7);
 120        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
 121        psw.mask = __extract_psw();
 122        psw_bits(psw).dat = 1;
 123        psw_bits(psw).as = PSW_BITS_AS_HOME;
 124        __load_psw_mask(psw.mask);
 125        kasan_free_early_identity();
 126
 127        sparse_init();
 128        zone_dma_bits = 31;
 129        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 130        max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
 131        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 132        free_area_init(max_zone_pfns);
 133}
 134
 135void mark_rodata_ro(void)
 136{
 137        unsigned long size = __end_ro_after_init - __start_ro_after_init;
 138
 139        set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
 140        pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 141        debug_checkwx();
 142}
 143
 144int set_memory_encrypted(unsigned long addr, int numpages)
 145{
 146        int i;
 147
 148        /* make specified pages unshared, (swiotlb, dma_free) */
 149        for (i = 0; i < numpages; ++i) {
 150                uv_remove_shared(addr);
 151                addr += PAGE_SIZE;
 152        }
 153        return 0;
 154}
 155
 156int set_memory_decrypted(unsigned long addr, int numpages)
 157{
 158        int i;
 159        /* make specified pages shared (swiotlb, dma_alloca) */
 160        for (i = 0; i < numpages; ++i) {
 161                uv_set_shared(addr);
 162                addr += PAGE_SIZE;
 163        }
 164        return 0;
 165}
 166
 167/* are we a protected virtualization guest? */
 168bool force_dma_unencrypted(struct device *dev)
 169{
 170        return is_prot_virt_guest();
 171}
 172
 173#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
 174
 175int arch_has_restricted_virtio_memory_access(void)
 176{
 177        return is_prot_virt_guest();
 178}
 179EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
 180
 181#endif
 182
 183/* protected virtualization */
 184static void pv_init(void)
 185{
 186        if (!is_prot_virt_guest())
 187                return;
 188
 189        /* make sure bounce buffers are shared */
 190        swiotlb_force = SWIOTLB_FORCE;
 191        swiotlb_init(1);
 192        swiotlb_update_mem_attributes();
 193}
 194
 195void __init mem_init(void)
 196{
 197        cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
 198        cpumask_set_cpu(0, mm_cpumask(&init_mm));
 199
 200        set_max_mapnr(max_low_pfn);
 201        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 202
 203        pv_init();
 204        kfence_split_mapping();
 205        /* Setup guest page hinting */
 206        cmma_init();
 207
 208        /* this will put all low memory onto the freelists */
 209        memblock_free_all();
 210        setup_zero_pages();     /* Setup zeroed pages. */
 211
 212        cmma_init_nodat();
 213}
 214
 215void free_initmem(void)
 216{
 217        initmem_freed = true;
 218        __set_memory((unsigned long)_sinittext,
 219                     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
 220                     SET_MEMORY_RW | SET_MEMORY_NX);
 221        free_initmem_default(POISON_FREE_INITMEM);
 222}
 223
 224unsigned long memory_block_size_bytes(void)
 225{
 226        /*
 227         * Make sure the memory block size is always greater
 228         * or equal than the memory increment size.
 229         */
 230        return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
 231}
 232
 233#ifdef CONFIG_MEMORY_HOTPLUG
 234
 235#ifdef CONFIG_CMA
 236
 237/* Prevent memory blocks which contain cma regions from going offline */
 238
 239struct s390_cma_mem_data {
 240        unsigned long start;
 241        unsigned long end;
 242};
 243
 244static int s390_cma_check_range(struct cma *cma, void *data)
 245{
 246        struct s390_cma_mem_data *mem_data;
 247        unsigned long start, end;
 248
 249        mem_data = data;
 250        start = cma_get_base(cma);
 251        end = start + cma_get_size(cma);
 252        if (end < mem_data->start)
 253                return 0;
 254        if (start >= mem_data->end)
 255                return 0;
 256        return -EBUSY;
 257}
 258
 259static int s390_cma_mem_notifier(struct notifier_block *nb,
 260                                 unsigned long action, void *data)
 261{
 262        struct s390_cma_mem_data mem_data;
 263        struct memory_notify *arg;
 264        int rc = 0;
 265
 266        arg = data;
 267        mem_data.start = arg->start_pfn << PAGE_SHIFT;
 268        mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
 269        if (action == MEM_GOING_OFFLINE)
 270                rc = cma_for_each_area(s390_cma_check_range, &mem_data);
 271        return notifier_from_errno(rc);
 272}
 273
 274static struct notifier_block s390_cma_mem_nb = {
 275        .notifier_call = s390_cma_mem_notifier,
 276};
 277
 278static int __init s390_cma_mem_init(void)
 279{
 280        return register_memory_notifier(&s390_cma_mem_nb);
 281}
 282device_initcall(s390_cma_mem_init);
 283
 284#endif /* CONFIG_CMA */
 285
 286int arch_add_memory(int nid, u64 start, u64 size,
 287                    struct mhp_params *params)
 288{
 289        unsigned long start_pfn = PFN_DOWN(start);
 290        unsigned long size_pages = PFN_DOWN(size);
 291        int rc;
 292
 293        if (WARN_ON_ONCE(params->altmap))
 294                return -EINVAL;
 295
 296        if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
 297                return -EINVAL;
 298
 299        VM_BUG_ON(!mhp_range_allowed(start, size, true));
 300        rc = vmem_add_mapping(start, size);
 301        if (rc)
 302                return rc;
 303
 304        rc = __add_pages(nid, start_pfn, size_pages, params);
 305        if (rc)
 306                vmem_remove_mapping(start, size);
 307        return rc;
 308}
 309
 310void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 311{
 312        unsigned long start_pfn = start >> PAGE_SHIFT;
 313        unsigned long nr_pages = size >> PAGE_SHIFT;
 314
 315        __remove_pages(start_pfn, nr_pages, altmap);
 316        vmem_remove_mapping(start, size);
 317}
 318#endif /* CONFIG_MEMORY_HOTPLUG */
 319