linux/arch/s390/mm/init.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *
   6 *  Derived from "arch/i386/mm/init.c"
   7 *    Copyright (C) 1995  Linus Torvalds
   8 */
   9
  10#include <linux/signal.h>
  11#include <linux/sched.h>
  12#include <linux/kernel.h>
  13#include <linux/errno.h>
  14#include <linux/string.h>
  15#include <linux/types.h>
  16#include <linux/ptrace.h>
  17#include <linux/mman.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/smp.h>
  21#include <linux/init.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memory.h>
  25#include <linux/pfn.h>
  26#include <linux/poison.h>
  27#include <linux/initrd.h>
  28#include <linux/export.h>
  29#include <linux/gfp.h>
  30#include <linux/memblock.h>
  31#include <asm/processor.h>
  32#include <asm/uaccess.h>
  33#include <asm/pgtable.h>
  34#include <asm/pgalloc.h>
  35#include <asm/dma.h>
  36#include <asm/lowcore.h>
  37#include <asm/tlb.h>
  38#include <asm/tlbflush.h>
  39#include <asm/sections.h>
  40#include <asm/ctl_reg.h>
  41#include <asm/sclp.h>
  42
  43pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
  44
  45unsigned long empty_zero_page, zero_page_mask;
  46EXPORT_SYMBOL(empty_zero_page);
  47EXPORT_SYMBOL(zero_page_mask);
  48
  49static void __init setup_zero_pages(void)
  50{
  51        unsigned int order;
  52        struct page *page;
  53        int i;
  54
  55        /* Latest machines require a mapping granularity of 512KB */
  56        order = 7;
  57
  58        /* Limit number of empty zero pages for small memory sizes */
  59        while (order > 2 && (totalram_pages >> 10) < (1UL << order))
  60                order--;
  61
  62        empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  63        if (!empty_zero_page)
  64                panic("Out of memory in setup_zero_pages");
  65
  66        page = virt_to_page((void *) empty_zero_page);
  67        split_page(page, order);
  68        for (i = 1 << order; i > 0; i--) {
  69                mark_page_reserved(page);
  70                page++;
  71        }
  72
  73        zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  74}
  75
  76/*
  77 * paging_init() sets up the page tables
  78 */
  79void __init paging_init(void)
  80{
  81        unsigned long max_zone_pfns[MAX_NR_ZONES];
  82        unsigned long pgd_type, asce_bits;
  83
  84        init_mm.pgd = swapper_pg_dir;
  85        if (VMALLOC_END > (1UL << 42)) {
  86                asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
  87                pgd_type = _REGION2_ENTRY_EMPTY;
  88        } else {
  89                asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
  90                pgd_type = _REGION3_ENTRY_EMPTY;
  91        }
  92        init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
  93        S390_lowcore.kernel_asce = init_mm.context.asce;
  94        clear_table((unsigned long *) init_mm.pgd, pgd_type,
  95                    sizeof(unsigned long)*2048);
  96        vmem_map_init();
  97
  98        /* enable virtual mapping in kernel mode */
  99        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
 100        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
 101        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
 102        __arch_local_irq_stosm(0x04);
 103
 104        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 105        sparse_init();
 106        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 107        max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
 108        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 109        free_area_init_nodes(max_zone_pfns);
 110}
 111
 112void mark_rodata_ro(void)
 113{
 114        /* Text and rodata are already protected. Nothing to do here. */
 115        pr_info("Write protecting the kernel read-only data: %luk\n",
 116                ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
 117}
 118
 119void __init mem_init(void)
 120{
 121        if (MACHINE_HAS_TLB_LC)
 122                cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
 123        cpumask_set_cpu(0, mm_cpumask(&init_mm));
 124        atomic_set(&init_mm.context.attach_count, 1);
 125
 126        set_max_mapnr(max_low_pfn);
 127        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 128
 129        /* Setup guest page hinting */
 130        cmma_init();
 131
 132        /* this will put all low memory onto the freelists */
 133        free_all_bootmem();
 134        setup_zero_pages();     /* Setup zeroed pages. */
 135
 136        mem_init_print_info(NULL);
 137}
 138
 139void free_initmem(void)
 140{
 141        free_initmem_default(POISON_FREE_INITMEM);
 142}
 143
 144#ifdef CONFIG_BLK_DEV_INITRD
 145void __init free_initrd_mem(unsigned long start, unsigned long end)
 146{
 147        free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
 148                           "initrd");
 149}
 150#endif
 151
 152#ifdef CONFIG_MEMORY_HOTPLUG
 153int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 154{
 155        unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
 156        unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
 157        unsigned long start_pfn = PFN_DOWN(start);
 158        unsigned long size_pages = PFN_DOWN(size);
 159        unsigned long nr_pages;
 160        int rc, zone_enum;
 161
 162        rc = vmem_add_mapping(start, size);
 163        if (rc)
 164                return rc;
 165
 166        while (size_pages > 0) {
 167                if (start_pfn < dma_end_pfn) {
 168                        nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
 169                                   dma_end_pfn - start_pfn : size_pages;
 170                        zone_enum = ZONE_DMA;
 171                } else if (start_pfn < normal_end_pfn) {
 172                        nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
 173                                   normal_end_pfn - start_pfn : size_pages;
 174                        zone_enum = ZONE_NORMAL;
 175                } else {
 176                        nr_pages = size_pages;
 177                        zone_enum = ZONE_MOVABLE;
 178                }
 179                rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
 180                                 start_pfn, size_pages);
 181                if (rc)
 182                        break;
 183                start_pfn += nr_pages;
 184                size_pages -= nr_pages;
 185        }
 186        if (rc)
 187                vmem_remove_mapping(start, size);
 188        return rc;
 189}
 190
 191unsigned long memory_block_size_bytes(void)
 192{
 193        /*
 194         * Make sure the memory block size is always greater
 195         * or equal than the memory increment size.
 196         */
 197        return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
 198}
 199
 200#ifdef CONFIG_MEMORY_HOTREMOVE
 201int arch_remove_memory(u64 start, u64 size)
 202{
 203        /*
 204         * There is no hardware or firmware interface which could trigger a
 205         * hot memory remove on s390. So there is nothing that needs to be
 206         * implemented.
 207         */
 208        return -EBUSY;
 209}
 210#endif
 211#endif /* CONFIG_MEMORY_HOTPLUG */
 212