linux/arch/ia64/mm/contig.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1998-2003 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Stephane Eranian <eranian@hpl.hp.com>
   9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
  10 * Copyright (C) 1999 VA Linux Systems
  11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
  13 *
  14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
  15 * memory.
  16 */
  17#include <linux/bootmem.h>
  18#include <linux/efi.h>
  19#include <linux/memblock.h>
  20#include <linux/mm.h>
  21#include <linux/nmi.h>
  22#include <linux/swap.h>
  23
  24#include <asm/meminit.h>
  25#include <asm/pgalloc.h>
  26#include <asm/pgtable.h>
  27#include <asm/sections.h>
  28#include <asm/mca.h>
  29
  30#ifdef CONFIG_VIRTUAL_MEM_MAP
  31static unsigned long max_gap;
  32#endif
  33
  34/**
  35 * show_mem - give short summary of memory stats
  36 *
  37 * Shows a simple page count of reserved and used pages in the system.
  38 * For discontig machines, it does this on a per-pgdat basis.
  39 */
  40void show_mem(unsigned int filter)
  41{
  42        int i, total_reserved = 0;
  43        int total_shared = 0, total_cached = 0;
  44        unsigned long total_present = 0;
  45        pg_data_t *pgdat;
  46
  47        printk(KERN_INFO "Mem-info:\n");
  48        show_free_areas(filter);
  49        printk(KERN_INFO "Node memory in pages:\n");
  50        if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
  51                return;
  52        for_each_online_pgdat(pgdat) {
  53                unsigned long present;
  54                unsigned long flags;
  55                int shared = 0, cached = 0, reserved = 0;
  56                int nid = pgdat->node_id;
  57
  58                if (skip_free_areas_node(filter, nid))
  59                        continue;
  60                pgdat_resize_lock(pgdat, &flags);
  61                present = pgdat->node_present_pages;
  62                for(i = 0; i < pgdat->node_spanned_pages; i++) {
  63                        struct page *page;
  64                        if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
  65                                touch_nmi_watchdog();
  66                        if (pfn_valid(pgdat->node_start_pfn + i))
  67                                page = pfn_to_page(pgdat->node_start_pfn + i);
  68                        else {
  69#ifdef CONFIG_VIRTUAL_MEM_MAP
  70                                if (max_gap < LARGE_GAP)
  71                                        continue;
  72#endif
  73                                i = vmemmap_find_next_valid_pfn(nid, i) - 1;
  74                                continue;
  75                        }
  76                        if (PageReserved(page))
  77                                reserved++;
  78                        else if (PageSwapCache(page))
  79                                cached++;
  80                        else if (page_count(page))
  81                                shared += page_count(page)-1;
  82                }
  83                pgdat_resize_unlock(pgdat, &flags);
  84                total_present += present;
  85                total_reserved += reserved;
  86                total_cached += cached;
  87                total_shared += shared;
  88                printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
  89                       "shrd: %10d, swpd: %10d\n", nid,
  90                       present, reserved, shared, cached);
  91        }
  92        printk(KERN_INFO "%ld pages of RAM\n", total_present);
  93        printk(KERN_INFO "%d reserved pages\n", total_reserved);
  94        printk(KERN_INFO "%d pages shared\n", total_shared);
  95        printk(KERN_INFO "%d pages swap cached\n", total_cached);
  96        printk(KERN_INFO "Total of %ld pages in page table cache\n",
  97               quicklist_total_size());
  98        printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
  99}
 100
 101
 102/* physical address where the bootmem map is located */
 103unsigned long bootmap_start;
 104
 105/**
 106 * find_bootmap_location - callback to find a memory area for the bootmap
 107 * @start: start of region
 108 * @end: end of region
 109 * @arg: unused callback data
 110 *
 111 * Find a place to put the bootmap and return its starting address in
 112 * bootmap_start.  This address must be page-aligned.
 113 */
 114static int __init
 115find_bootmap_location (u64 start, u64 end, void *arg)
 116{
 117        u64 needed = *(unsigned long *)arg;
 118        u64 range_start, range_end, free_start;
 119        int i;
 120
 121#if IGNORE_PFN0
 122        if (start == PAGE_OFFSET) {
 123                start += PAGE_SIZE;
 124                if (start >= end)
 125                        return 0;
 126        }
 127#endif
 128
 129        free_start = PAGE_OFFSET;
 130
 131        for (i = 0; i < num_rsvd_regions; i++) {
 132                range_start = max(start, free_start);
 133                range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
 134
 135                free_start = PAGE_ALIGN(rsvd_region[i].end);
 136
 137                if (range_end <= range_start)
 138                        continue; /* skip over empty range */
 139
 140                if (range_end - range_start >= needed) {
 141                        bootmap_start = __pa(range_start);
 142                        return -1;      /* done */
 143                }
 144
 145                /* nothing more available in this segment */
 146                if (range_end == end)
 147                        return 0;
 148        }
 149        return 0;
 150}
 151
 152#ifdef CONFIG_SMP
 153static void *cpu_data;
 154/**
 155 * per_cpu_init - setup per-cpu variables
 156 *
 157 * Allocate and setup per-cpu data areas.
 158 */
 159void * __cpuinit
 160per_cpu_init (void)
 161{
 162        static bool first_time = true;
 163        void *cpu0_data = __cpu0_per_cpu;
 164        unsigned int cpu;
 165
 166        if (!first_time)
 167                goto skip;
 168        first_time = false;
 169
 170        /*
 171         * get_free_pages() cannot be used before cpu_init() done.
 172         * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
 173         * to avoid that AP calls get_zeroed_page().
 174         */
 175        for_each_possible_cpu(cpu) {
 176                void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
 177
 178                memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
 179                __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
 180                per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
 181
 182                /*
 183                 * percpu area for cpu0 is moved from the __init area
 184                 * which is setup by head.S and used till this point.
 185                 * Update ar.k3.  This move is ensures that percpu
 186                 * area for cpu0 is on the correct node and its
 187                 * virtual address isn't insanely far from other
 188                 * percpu areas which is important for congruent
 189                 * percpu allocator.
 190                 */
 191                if (cpu == 0)
 192                        ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
 193                                    (unsigned long)__per_cpu_start);
 194
 195                cpu_data += PERCPU_PAGE_SIZE;
 196        }
 197skip:
 198        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 199}
 200
 201static inline void
 202alloc_per_cpu_data(void)
 203{
 204        cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
 205                                   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 206}
 207
 208/**
 209 * setup_per_cpu_areas - setup percpu areas
 210 *
 211 * Arch code has already allocated and initialized percpu areas.  All
 212 * this function has to do is to teach the determined layout to the
 213 * dynamic percpu allocator, which happens to be more complex than
 214 * creating whole new ones using helpers.
 215 */
 216void __init
 217setup_per_cpu_areas(void)
 218{
 219        struct pcpu_alloc_info *ai;
 220        struct pcpu_group_info *gi;
 221        unsigned int cpu;
 222        ssize_t static_size, reserved_size, dyn_size;
 223        int rc;
 224
 225        ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
 226        if (!ai)
 227                panic("failed to allocate pcpu_alloc_info");
 228        gi = &ai->groups[0];
 229
 230        /* units are assigned consecutively to possible cpus */
 231        for_each_possible_cpu(cpu)
 232                gi->cpu_map[gi->nr_units++] = cpu;
 233
 234        /* set parameters */
 235        static_size = __per_cpu_end - __per_cpu_start;
 236        reserved_size = PERCPU_MODULE_RESERVE;
 237        dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
 238        if (dyn_size < 0)
 239                panic("percpu area overflow static=%zd reserved=%zd\n",
 240                      static_size, reserved_size);
 241
 242        ai->static_size         = static_size;
 243        ai->reserved_size       = reserved_size;
 244        ai->dyn_size            = dyn_size;
 245        ai->unit_size           = PERCPU_PAGE_SIZE;
 246        ai->atom_size           = PAGE_SIZE;
 247        ai->alloc_size          = PERCPU_PAGE_SIZE;
 248
 249        rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
 250        if (rc)
 251                panic("failed to setup percpu area (err=%d)", rc);
 252
 253        pcpu_free_alloc_info(ai);
 254}
 255#else
 256#define alloc_per_cpu_data() do { } while (0)
 257#endif /* CONFIG_SMP */
 258
 259/**
 260 * find_memory - setup memory map
 261 *
 262 * Walk the EFI memory map and find usable memory for the system, taking
 263 * into account reserved areas.
 264 */
 265void __init
 266find_memory (void)
 267{
 268        unsigned long bootmap_size;
 269
 270        reserve_memory();
 271
 272        /* first find highest page frame number */
 273        min_low_pfn = ~0UL;
 274        max_low_pfn = 0;
 275        efi_memmap_walk(find_max_min_low_pfn, NULL);
 276        max_pfn = max_low_pfn;
 277        /* how many bytes to cover all the pages */
 278        bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
 279
 280        /* look for a location to hold the bootmap */
 281        bootmap_start = ~0UL;
 282        efi_memmap_walk(find_bootmap_location, &bootmap_size);
 283        if (bootmap_start == ~0UL)
 284                panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
 285
 286        bootmap_size = init_bootmem_node(NODE_DATA(0),
 287                        (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
 288
 289        /* Free all available memory, then mark bootmem-map as being in use. */
 290        efi_memmap_walk(filter_rsvd_memory, free_bootmem);
 291        reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
 292
 293        find_initrd();
 294
 295        alloc_per_cpu_data();
 296}
 297
 298static int count_pages(u64 start, u64 end, void *arg)
 299{
 300        unsigned long *count = arg;
 301
 302        *count += (end - start) >> PAGE_SHIFT;
 303        return 0;
 304}
 305
 306/*
 307 * Set up the page tables.
 308 */
 309
 310void __init
 311paging_init (void)
 312{
 313        unsigned long max_dma;
 314        unsigned long max_zone_pfns[MAX_NR_ZONES];
 315
 316        num_physpages = 0;
 317        efi_memmap_walk(count_pages, &num_physpages);
 318
 319        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 320#ifdef CONFIG_ZONE_DMA
 321        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 322        max_zone_pfns[ZONE_DMA] = max_dma;
 323#endif
 324        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 325
 326#ifdef CONFIG_VIRTUAL_MEM_MAP
 327        efi_memmap_walk(filter_memory, register_active_ranges);
 328        efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
 329        if (max_gap < LARGE_GAP) {
 330                vmem_map = (struct page *) 0;
 331                free_area_init_nodes(max_zone_pfns);
 332        } else {
 333                unsigned long map_size;
 334
 335                /* allocate virtual_mem_map */
 336
 337                map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
 338                        sizeof(struct page));
 339                VMALLOC_END -= map_size;
 340                vmem_map = (struct page *) VMALLOC_END;
 341                efi_memmap_walk(create_mem_map_page_table, NULL);
 342
 343                /*
 344                 * alloc_node_mem_map makes an adjustment for mem_map
 345                 * which isn't compatible with vmem_map.
 346                 */
 347                NODE_DATA(0)->node_mem_map = vmem_map +
 348                        find_min_pfn_with_active_regions();
 349                free_area_init_nodes(max_zone_pfns);
 350
 351                printk("Virtual mem_map starts at 0x%p\n", mem_map);
 352        }
 353#else /* !CONFIG_VIRTUAL_MEM_MAP */
 354        memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
 355        free_area_init_nodes(max_zone_pfns);
 356#endif /* !CONFIG_VIRTUAL_MEM_MAP */
 357        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 358}
 359