linux/arch/ia64/mm/contig.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1998-2003 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Stephane Eranian <eranian@hpl.hp.com>
   9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
  10 * Copyright (C) 1999 VA Linux Systems
  11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
  13 *
  14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
  15 * memory.
  16 */
  17#include <linux/efi.h>
  18#include <linux/memblock.h>
  19#include <linux/mm.h>
  20#include <linux/nmi.h>
  21#include <linux/swap.h>
  22
  23#include <asm/meminit.h>
  24#include <asm/sections.h>
  25#include <asm/mca.h>
  26
  27#ifdef CONFIG_VIRTUAL_MEM_MAP
  28static unsigned long max_gap;
  29#endif
  30
  31/* physical address where the bootmem map is located */
  32unsigned long bootmap_start;
  33
  34#ifdef CONFIG_SMP
  35static void *cpu_data;
  36/**
  37 * per_cpu_init - setup per-cpu variables
  38 *
  39 * Allocate and setup per-cpu data areas.
  40 */
  41void *per_cpu_init(void)
  42{
  43        static bool first_time = true;
  44        void *cpu0_data = __cpu0_per_cpu;
  45        unsigned int cpu;
  46
  47        if (!first_time)
  48                goto skip;
  49        first_time = false;
  50
  51        /*
  52         * get_free_pages() cannot be used before cpu_init() done.
  53         * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
  54         * to avoid that AP calls get_zeroed_page().
  55         */
  56        for_each_possible_cpu(cpu) {
  57                void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
  58
  59                memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
  60                __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
  61                per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
  62
  63                /*
  64                 * percpu area for cpu0 is moved from the __init area
  65                 * which is setup by head.S and used till this point.
  66                 * Update ar.k3.  This move is ensures that percpu
  67                 * area for cpu0 is on the correct node and its
  68                 * virtual address isn't insanely far from other
  69                 * percpu areas which is important for congruent
  70                 * percpu allocator.
  71                 */
  72                if (cpu == 0)
  73                        ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
  74                                    (unsigned long)__per_cpu_start);
  75
  76                cpu_data += PERCPU_PAGE_SIZE;
  77        }
  78skip:
  79        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
  80}
  81
  82static inline void
  83alloc_per_cpu_data(void)
  84{
  85        size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
  86
  87        cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
  88                                       __pa(MAX_DMA_ADDRESS));
  89        if (!cpu_data)
  90                panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
  91                      __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
  92}
  93
  94/**
  95 * setup_per_cpu_areas - setup percpu areas
  96 *
  97 * Arch code has already allocated and initialized percpu areas.  All
  98 * this function has to do is to teach the determined layout to the
  99 * dynamic percpu allocator, which happens to be more complex than
 100 * creating whole new ones using helpers.
 101 */
 102void __init
 103setup_per_cpu_areas(void)
 104{
 105        struct pcpu_alloc_info *ai;
 106        struct pcpu_group_info *gi;
 107        unsigned int cpu;
 108        ssize_t static_size, reserved_size, dyn_size;
 109
 110        ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
 111        if (!ai)
 112                panic("failed to allocate pcpu_alloc_info");
 113        gi = &ai->groups[0];
 114
 115        /* units are assigned consecutively to possible cpus */
 116        for_each_possible_cpu(cpu)
 117                gi->cpu_map[gi->nr_units++] = cpu;
 118
 119        /* set parameters */
 120        static_size = __per_cpu_end - __per_cpu_start;
 121        reserved_size = PERCPU_MODULE_RESERVE;
 122        dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
 123        if (dyn_size < 0)
 124                panic("percpu area overflow static=%zd reserved=%zd\n",
 125                      static_size, reserved_size);
 126
 127        ai->static_size         = static_size;
 128        ai->reserved_size       = reserved_size;
 129        ai->dyn_size            = dyn_size;
 130        ai->unit_size           = PERCPU_PAGE_SIZE;
 131        ai->atom_size           = PAGE_SIZE;
 132        ai->alloc_size          = PERCPU_PAGE_SIZE;
 133
 134        pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
 135        pcpu_free_alloc_info(ai);
 136}
 137#else
 138#define alloc_per_cpu_data() do { } while (0)
 139#endif /* CONFIG_SMP */
 140
 141/**
 142 * find_memory - setup memory map
 143 *
 144 * Walk the EFI memory map and find usable memory for the system, taking
 145 * into account reserved areas.
 146 */
 147void __init
 148find_memory (void)
 149{
 150        reserve_memory();
 151
 152        /* first find highest page frame number */
 153        min_low_pfn = ~0UL;
 154        max_low_pfn = 0;
 155        efi_memmap_walk(find_max_min_low_pfn, NULL);
 156        max_pfn = max_low_pfn;
 157
 158#ifdef CONFIG_VIRTUAL_MEM_MAP
 159        efi_memmap_walk(filter_memory, register_active_ranges);
 160#else
 161        memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
 162#endif
 163
 164        find_initrd();
 165
 166        alloc_per_cpu_data();
 167}
 168
 169/*
 170 * Set up the page tables.
 171 */
 172
 173void __init
 174paging_init (void)
 175{
 176        unsigned long max_dma;
 177        unsigned long max_zone_pfns[MAX_NR_ZONES];
 178
 179        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 180#ifdef CONFIG_ZONE_DMA32
 181        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 182        max_zone_pfns[ZONE_DMA32] = max_dma;
 183#endif
 184        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 185
 186#ifdef CONFIG_VIRTUAL_MEM_MAP
 187        efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
 188        if (max_gap < LARGE_GAP) {
 189                vmem_map = (struct page *) 0;
 190        } else {
 191                unsigned long map_size;
 192
 193                /* allocate virtual_mem_map */
 194
 195                map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
 196                        sizeof(struct page));
 197                VMALLOC_END -= map_size;
 198                vmem_map = (struct page *) VMALLOC_END;
 199                efi_memmap_walk(create_mem_map_page_table, NULL);
 200
 201                /*
 202                 * alloc_node_mem_map makes an adjustment for mem_map
 203                 * which isn't compatible with vmem_map.
 204                 */
 205                NODE_DATA(0)->node_mem_map = vmem_map +
 206                        find_min_pfn_with_active_regions();
 207
 208                printk("Virtual mem_map starts at 0x%p\n", mem_map);
 209        }
 210#endif /* !CONFIG_VIRTUAL_MEM_MAP */
 211        free_area_init(max_zone_pfns);
 212        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 213}
 214