linux/arch/s390/mm/init.c
<<
>>
Prefs
   1/*
   2 *  S390 version
   3 *    Copyright IBM Corp. 1999
   4 *    Author(s): Hartmut Penner (hp@de.ibm.com)
   5 *
   6 *  Derived from "arch/i386/mm/init.c"
   7 *    Copyright (C) 1995  Linus Torvalds
   8 */
   9
  10#include <linux/signal.h>
  11#include <linux/sched.h>
  12#include <linux/kernel.h>
  13#include <linux/errno.h>
  14#include <linux/string.h>
  15#include <linux/types.h>
  16#include <linux/ptrace.h>
  17#include <linux/mman.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/smp.h>
  21#include <linux/init.h>
  22#include <linux/pagemap.h>
  23#include <linux/bootmem.h>
  24#include <linux/memory.h>
  25#include <linux/pfn.h>
  26#include <linux/poison.h>
  27#include <linux/initrd.h>
  28#include <linux/export.h>
  29#include <linux/gfp.h>
  30#include <linux/memblock.h>
  31#include <asm/processor.h>
  32#include <linux/uaccess.h>
  33#include <asm/pgtable.h>
  34#include <asm/pgalloc.h>
  35#include <asm/dma.h>
  36#include <asm/lowcore.h>
  37#include <asm/tlb.h>
  38#include <asm/tlbflush.h>
  39#include <asm/sections.h>
  40#include <asm/ctl_reg.h>
  41#include <asm/sclp.h>
  42#include <asm/set_memory.h>
  43
  44pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
  45
  46unsigned long empty_zero_page, zero_page_mask;
  47EXPORT_SYMBOL(empty_zero_page);
  48EXPORT_SYMBOL(zero_page_mask);
  49
  50static void __init setup_zero_pages(void)
  51{
  52        unsigned int order;
  53        struct page *page;
  54        int i;
  55
  56        /* Latest machines require a mapping granularity of 512KB */
  57        order = 7;
  58
  59        /* Limit number of empty zero pages for small memory sizes */
  60        while (order > 2 && (totalram_pages >> 10) < (1UL << order))
  61                order--;
  62
  63        empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  64        if (!empty_zero_page)
  65                panic("Out of memory in setup_zero_pages");
  66
  67        page = virt_to_page((void *) empty_zero_page);
  68        split_page(page, order);
  69        for (i = 1 << order; i > 0; i--) {
  70                mark_page_reserved(page);
  71                page++;
  72        }
  73
  74        zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  75}
  76
  77/*
  78 * paging_init() sets up the page tables
  79 */
  80void __init paging_init(void)
  81{
  82        unsigned long max_zone_pfns[MAX_NR_ZONES];
  83        unsigned long pgd_type, asce_bits;
  84
  85        init_mm.pgd = swapper_pg_dir;
  86        if (VMALLOC_END > (1UL << 42)) {
  87                asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
  88                pgd_type = _REGION2_ENTRY_EMPTY;
  89        } else {
  90                asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
  91                pgd_type = _REGION3_ENTRY_EMPTY;
  92        }
  93        init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
  94        S390_lowcore.kernel_asce = init_mm.context.asce;
  95        clear_table((unsigned long *) init_mm.pgd, pgd_type,
  96                    sizeof(unsigned long)*2048);
  97        vmem_map_init();
  98
  99        /* enable virtual mapping in kernel mode */
 100        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
 101        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
 102        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
 103        __arch_local_irq_stosm(0x04);
 104
 105        sparse_memory_present_with_active_regions(MAX_NUMNODES);
 106        sparse_init();
 107        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 108        max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
 109        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 110        free_area_init_nodes(max_zone_pfns);
 111}
 112
 113void mark_rodata_ro(void)
 114{
 115        unsigned long size = __end_ro_after_init - __start_ro_after_init;
 116
 117        set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
 118        pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 119}
 120
 121void __init mem_init(void)
 122{
 123        cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
 124        cpumask_set_cpu(0, mm_cpumask(&init_mm));
 125
 126        set_max_mapnr(max_low_pfn);
 127        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 128
 129        /* Setup guest page hinting */
 130        cmma_init();
 131
 132        /* this will put all low memory onto the freelists */
 133        free_all_bootmem();
 134        setup_zero_pages();     /* Setup zeroed pages. */
 135
 136        mem_init_print_info(NULL);
 137}
 138
 139void free_initmem(void)
 140{
 141        __set_memory((unsigned long) _sinittext,
 142                     (_einittext - _sinittext) >> PAGE_SHIFT,
 143                     SET_MEMORY_RW | SET_MEMORY_NX);
 144        free_initmem_default(POISON_FREE_INITMEM);
 145}
 146
 147#ifdef CONFIG_BLK_DEV_INITRD
 148void __init free_initrd_mem(unsigned long start, unsigned long end)
 149{
 150        free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
 151                           "initrd");
 152}
 153#endif
 154
 155unsigned long memory_block_size_bytes(void)
 156{
 157        /*
 158         * Make sure the memory block size is always greater
 159         * or equal than the memory increment size.
 160         */
 161        return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
 162}
 163
 164#ifdef CONFIG_MEMORY_HOTPLUG
 165int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 166{
 167        unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
 168        unsigned long start_pfn = PFN_DOWN(start);
 169        unsigned long size_pages = PFN_DOWN(size);
 170        pg_data_t *pgdat = NODE_DATA(nid);
 171        struct zone *zone;
 172        int rc, i;
 173
 174        rc = vmem_add_mapping(start, size);
 175        if (rc)
 176                return rc;
 177
 178        for (i = 0; i < MAX_NR_ZONES; i++) {
 179                zone = pgdat->node_zones + i;
 180                if (zone_idx(zone) != ZONE_MOVABLE) {
 181                        /* Add range within existing zone limits, if possible */
 182                        zone_start_pfn = zone->zone_start_pfn;
 183                        zone_end_pfn = zone->zone_start_pfn +
 184                                       zone->spanned_pages;
 185                } else {
 186                        /* Add remaining range to ZONE_MOVABLE */
 187                        zone_start_pfn = start_pfn;
 188                        zone_end_pfn = start_pfn + size_pages;
 189                }
 190                if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
 191                        continue;
 192                nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
 193                           zone_end_pfn - start_pfn : size_pages;
 194                rc = __add_pages(nid, zone, start_pfn, nr_pages);
 195                if (rc)
 196                        break;
 197                start_pfn += nr_pages;
 198                size_pages -= nr_pages;
 199                if (!size_pages)
 200                        break;
 201        }
 202        if (rc)
 203                vmem_remove_mapping(start, size);
 204        return rc;
 205}
 206
 207#ifdef CONFIG_MEMORY_HOTREMOVE
 208int arch_remove_memory(u64 start, u64 size)
 209{
 210        /*
 211         * There is no hardware or firmware interface which could trigger a
 212         * hot memory remove on s390. So there is nothing that needs to be
 213         * implemented.
 214         */
 215        return -EBUSY;
 216}
 217#endif
 218#endif /* CONFIG_MEMORY_HOTPLUG */
 219