linux/arch/parisc/mm/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/arch/parisc/mm/init.c
   4 *
   5 *  Copyright (C) 1995  Linus Torvalds
   6 *  Copyright 1999 SuSE GmbH
   7 *    changed by Philipp Rumpf
   8 *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
   9 *  Copyright 2004 Randolph Chung (tausq@debian.org)
  10 *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
  11 *
  12 */
  13
  14
  15#include <linux/module.h>
  16#include <linux/mm.h>
  17#include <linux/memblock.h>
  18#include <linux/gfp.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
  21#include <linux/initrd.h>
  22#include <linux/swap.h>
  23#include <linux/unistd.h>
  24#include <linux/nodemask.h>     /* for node_online_map */
  25#include <linux/pagemap.h>      /* for release_pages */
  26#include <linux/compat.h>
  27
  28#include <asm/pgalloc.h>
  29#include <asm/tlb.h>
  30#include <asm/pdc_chassis.h>
  31#include <asm/mmzone.h>
  32#include <asm/sections.h>
  33#include <asm/msgbuf.h>
  34#include <asm/sparsemem.h>
  35
  36extern int  data_start;
  37extern void parisc_kernel_start(void);  /* Kernel entry point in head.S */
  38
  39#if CONFIG_PGTABLE_LEVELS == 3
  40/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
  41 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
  42 * guarantee that global objects will be laid out in memory in the same order
  43 * as the order of declaration, so put these in different sections and use
  44 * the linker script to order them. */
  45pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
  46#endif
  47
  48pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
  49pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
  50
  51static struct resource data_resource = {
  52        .name   = "Kernel data",
  53        .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  54};
  55
  56static struct resource code_resource = {
  57        .name   = "Kernel code",
  58        .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  59};
  60
  61static struct resource pdcdata_resource = {
  62        .name   = "PDC data (Page Zero)",
  63        .start  = 0,
  64        .end    = 0x9ff,
  65        .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
  66};
  67
  68static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
  69
  70/* The following array is initialized from the firmware specific
  71 * information retrieved in kernel/inventory.c.
  72 */
  73
  74physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
  75int npmem_ranges __initdata;
  76
  77#ifdef CONFIG_64BIT
  78#define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
  79#else /* !CONFIG_64BIT */
  80#define MAX_MEM         (3584U*1024U*1024U)
  81#endif /* !CONFIG_64BIT */
  82
  83static unsigned long mem_limit __read_mostly = MAX_MEM;
  84
  85static void __init mem_limit_func(void)
  86{
  87        char *cp, *end;
  88        unsigned long limit;
  89
  90        /* We need this before __setup() functions are called */
  91
  92        limit = MAX_MEM;
  93        for (cp = boot_command_line; *cp; ) {
  94                if (memcmp(cp, "mem=", 4) == 0) {
  95                        cp += 4;
  96                        limit = memparse(cp, &end);
  97                        if (end != cp)
  98                                break;
  99                        cp = end;
 100                } else {
 101                        while (*cp != ' ' && *cp)
 102                                ++cp;
 103                        while (*cp == ' ')
 104                                ++cp;
 105                }
 106        }
 107
 108        if (limit < mem_limit)
 109                mem_limit = limit;
 110}
 111
 112#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
 113
 114static void __init setup_bootmem(void)
 115{
 116        unsigned long mem_max;
 117#ifndef CONFIG_SPARSEMEM
 118        physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
 119        int npmem_holes;
 120#endif
 121        int i, sysram_resource_count;
 122
 123        disable_sr_hashing(); /* Turn off space register hashing */
 124
 125        /*
 126         * Sort the ranges. Since the number of ranges is typically
 127         * small, and performance is not an issue here, just do
 128         * a simple insertion sort.
 129         */
 130
 131        for (i = 1; i < npmem_ranges; i++) {
 132                int j;
 133
 134                for (j = i; j > 0; j--) {
 135                        physmem_range_t tmp;
 136
 137                        if (pmem_ranges[j-1].start_pfn <
 138                            pmem_ranges[j].start_pfn) {
 139
 140                                break;
 141                        }
 142                        tmp = pmem_ranges[j-1];
 143                        pmem_ranges[j-1] = pmem_ranges[j];
 144                        pmem_ranges[j] = tmp;
 145                }
 146        }
 147
 148#ifndef CONFIG_SPARSEMEM
 149        /*
 150         * Throw out ranges that are too far apart (controlled by
 151         * MAX_GAP).
 152         */
 153
 154        for (i = 1; i < npmem_ranges; i++) {
 155                if (pmem_ranges[i].start_pfn -
 156                        (pmem_ranges[i-1].start_pfn +
 157                         pmem_ranges[i-1].pages) > MAX_GAP) {
 158                        npmem_ranges = i;
 159                        printk("Large gap in memory detected (%ld pages). "
 160                               "Consider turning on CONFIG_SPARSEMEM\n",
 161                               pmem_ranges[i].start_pfn -
 162                               (pmem_ranges[i-1].start_pfn +
 163                                pmem_ranges[i-1].pages));
 164                        break;
 165                }
 166        }
 167#endif
 168
 169        /* Print the memory ranges */
 170        pr_info("Memory Ranges:\n");
 171
 172        for (i = 0; i < npmem_ranges; i++) {
 173                struct resource *res = &sysram_resources[i];
 174                unsigned long start;
 175                unsigned long size;
 176
 177                size = (pmem_ranges[i].pages << PAGE_SHIFT);
 178                start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
 179                pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
 180                        i, start, start + (size - 1), size >> 20);
 181
 182                /* request memory resource */
 183                res->name = "System RAM";
 184                res->start = start;
 185                res->end = start + size - 1;
 186                res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 187                request_resource(&iomem_resource, res);
 188        }
 189
 190        sysram_resource_count = npmem_ranges;
 191
 192        /*
 193         * For 32 bit kernels we limit the amount of memory we can
 194         * support, in order to preserve enough kernel address space
 195         * for other purposes. For 64 bit kernels we don't normally
 196         * limit the memory, but this mechanism can be used to
 197         * artificially limit the amount of memory (and it is written
 198         * to work with multiple memory ranges).
 199         */
 200
 201        mem_limit_func();       /* check for "mem=" argument */
 202
 203        mem_max = 0;
 204        for (i = 0; i < npmem_ranges; i++) {
 205                unsigned long rsize;
 206
 207                rsize = pmem_ranges[i].pages << PAGE_SHIFT;
 208                if ((mem_max + rsize) > mem_limit) {
 209                        printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
 210                        if (mem_max == mem_limit)
 211                                npmem_ranges = i;
 212                        else {
 213                                pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
 214                                                       - (mem_max >> PAGE_SHIFT);
 215                                npmem_ranges = i + 1;
 216                                mem_max = mem_limit;
 217                        }
 218                        break;
 219                }
 220                mem_max += rsize;
 221        }
 222
 223        printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
 224
 225#ifndef CONFIG_SPARSEMEM
 226        /* Merge the ranges, keeping track of the holes */
 227        {
 228                unsigned long end_pfn;
 229                unsigned long hole_pages;
 230
 231                npmem_holes = 0;
 232                end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
 233                for (i = 1; i < npmem_ranges; i++) {
 234
 235                        hole_pages = pmem_ranges[i].start_pfn - end_pfn;
 236                        if (hole_pages) {
 237                                pmem_holes[npmem_holes].start_pfn = end_pfn;
 238                                pmem_holes[npmem_holes++].pages = hole_pages;
 239                                end_pfn += hole_pages;
 240                        }
 241                        end_pfn += pmem_ranges[i].pages;
 242                }
 243
 244                pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
 245                npmem_ranges = 1;
 246        }
 247#endif
 248
 249        /*
 250         * Initialize and free the full range of memory in each range.
 251         */
 252
 253        max_pfn = 0;
 254        for (i = 0; i < npmem_ranges; i++) {
 255                unsigned long start_pfn;
 256                unsigned long npages;
 257                unsigned long start;
 258                unsigned long size;
 259
 260                start_pfn = pmem_ranges[i].start_pfn;
 261                npages = pmem_ranges[i].pages;
 262
 263                start = start_pfn << PAGE_SHIFT;
 264                size = npages << PAGE_SHIFT;
 265
 266                /* add system RAM memblock */
 267                memblock_add(start, size);
 268
 269                if ((start_pfn + npages) > max_pfn)
 270                        max_pfn = start_pfn + npages;
 271        }
 272
 273        /*
 274         * We can't use memblock top-down allocations because we only
 275         * created the initial mapping up to KERNEL_INITIAL_SIZE in
 276         * the assembly bootup code.
 277         */
 278        memblock_set_bottom_up(true);
 279
 280        /* IOMMU is always used to access "high mem" on those boxes
 281         * that can support enough mem that a PCI device couldn't
 282         * directly DMA to any physical addresses.
 283         * ISA DMA support will need to revisit this.
 284         */
 285        max_low_pfn = max_pfn;
 286
 287        /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
 288
 289#define PDC_CONSOLE_IO_IODC_SIZE 32768
 290
 291        memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
 292                                PDC_CONSOLE_IO_IODC_SIZE));
 293        memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
 294                        (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
 295
 296#ifndef CONFIG_SPARSEMEM
 297
 298        /* reserve the holes */
 299
 300        for (i = 0; i < npmem_holes; i++) {
 301                memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
 302                                (pmem_holes[i].pages << PAGE_SHIFT));
 303        }
 304#endif
 305
 306#ifdef CONFIG_BLK_DEV_INITRD
 307        if (initrd_start) {
 308                printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
 309                if (__pa(initrd_start) < mem_max) {
 310                        unsigned long initrd_reserve;
 311
 312                        if (__pa(initrd_end) > mem_max) {
 313                                initrd_reserve = mem_max - __pa(initrd_start);
 314                        } else {
 315                                initrd_reserve = initrd_end - initrd_start;
 316                        }
 317                        initrd_below_start_ok = 1;
 318                        printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
 319
 320                        memblock_reserve(__pa(initrd_start), initrd_reserve);
 321                }
 322        }
 323#endif
 324
 325        data_resource.start =  virt_to_phys(&data_start);
 326        data_resource.end = virt_to_phys(_end) - 1;
 327        code_resource.start = virt_to_phys(_text);
 328        code_resource.end = virt_to_phys(&data_start)-1;
 329
 330        /* We don't know which region the kernel will be in, so try
 331         * all of them.
 332         */
 333        for (i = 0; i < sysram_resource_count; i++) {
 334                struct resource *res = &sysram_resources[i];
 335                request_resource(res, &code_resource);
 336                request_resource(res, &data_resource);
 337        }
 338        request_resource(&sysram_resources[0], &pdcdata_resource);
 339
 340        /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
 341        pdc_pdt_init();
 342
 343        memblock_allow_resize();
 344        memblock_dump_all();
 345}
 346
 347static bool kernel_set_to_readonly;
 348
 349static void __init map_pages(unsigned long start_vaddr,
 350                             unsigned long start_paddr, unsigned long size,
 351                             pgprot_t pgprot, int force)
 352{
 353        pmd_t *pmd;
 354        pte_t *pg_table;
 355        unsigned long end_paddr;
 356        unsigned long start_pmd;
 357        unsigned long start_pte;
 358        unsigned long tmp1;
 359        unsigned long tmp2;
 360        unsigned long address;
 361        unsigned long vaddr;
 362        unsigned long ro_start;
 363        unsigned long ro_end;
 364        unsigned long kernel_start, kernel_end;
 365
 366        ro_start = __pa((unsigned long)_text);
 367        ro_end   = __pa((unsigned long)&data_start);
 368        kernel_start = __pa((unsigned long)&__init_begin);
 369        kernel_end  = __pa((unsigned long)&_end);
 370
 371        end_paddr = start_paddr + size;
 372
 373        /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
 374        start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 375        start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 376
 377        address = start_paddr;
 378        vaddr = start_vaddr;
 379        while (address < end_paddr) {
 380                pgd_t *pgd = pgd_offset_k(vaddr);
 381                p4d_t *p4d = p4d_offset(pgd, vaddr);
 382                pud_t *pud = pud_offset(p4d, vaddr);
 383
 384#if CONFIG_PGTABLE_LEVELS == 3
 385                if (pud_none(*pud)) {
 386                        pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
 387                                             PAGE_SIZE << PMD_ORDER);
 388                        if (!pmd)
 389                                panic("pmd allocation failed.\n");
 390                        pud_populate(NULL, pud, pmd);
 391                }
 392#endif
 393
 394                pmd = pmd_offset(pud, vaddr);
 395                for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
 396                        if (pmd_none(*pmd)) {
 397                                pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 398                                if (!pg_table)
 399                                        panic("page table allocation failed\n");
 400                                pmd_populate_kernel(NULL, pmd, pg_table);
 401                        }
 402
 403                        pg_table = pte_offset_kernel(pmd, vaddr);
 404                        for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
 405                                pte_t pte;
 406                                pgprot_t prot;
 407                                bool huge = false;
 408
 409                                if (force) {
 410                                        prot = pgprot;
 411                                } else if (address < kernel_start || address >= kernel_end) {
 412                                        /* outside kernel memory */
 413                                        prot = PAGE_KERNEL;
 414                                } else if (!kernel_set_to_readonly) {
 415                                        /* still initializing, allow writing to RO memory */
 416                                        prot = PAGE_KERNEL_RWX;
 417                                        huge = true;
 418                                } else if (address >= ro_start) {
 419                                        /* Code (ro) and Data areas */
 420                                        prot = (address < ro_end) ?
 421                                                PAGE_KERNEL_EXEC : PAGE_KERNEL;
 422                                        huge = true;
 423                                } else {
 424                                        prot = PAGE_KERNEL;
 425                                }
 426
 427                                pte = __mk_pte(address, prot);
 428                                if (huge)
 429                                        pte = pte_mkhuge(pte);
 430
 431                                if (address >= end_paddr)
 432                                        break;
 433
 434                                set_pte(pg_table, pte);
 435
 436                                address += PAGE_SIZE;
 437                                vaddr += PAGE_SIZE;
 438                        }
 439                        start_pte = 0;
 440
 441                        if (address >= end_paddr)
 442                            break;
 443                }
 444                start_pmd = 0;
 445        }
 446}
 447
 448void __init set_kernel_text_rw(int enable_read_write)
 449{
 450        unsigned long start = (unsigned long) __init_begin;
 451        unsigned long end   = (unsigned long) &data_start;
 452
 453        map_pages(start, __pa(start), end-start,
 454                PAGE_KERNEL_RWX, enable_read_write ? 1:0);
 455
 456        /* force the kernel to see the new page table entries */
 457        flush_cache_all();
 458        flush_tlb_all();
 459}
 460
 461void __ref free_initmem(void)
 462{
 463        unsigned long init_begin = (unsigned long)__init_begin;
 464        unsigned long init_end = (unsigned long)__init_end;
 465        unsigned long kernel_end  = (unsigned long)&_end;
 466
 467        /* Remap kernel text and data, but do not touch init section yet. */
 468        kernel_set_to_readonly = true;
 469        map_pages(init_end, __pa(init_end), kernel_end - init_end,
 470                  PAGE_KERNEL, 0);
 471
 472        /* The init text pages are marked R-X.  We have to
 473         * flush the icache and mark them RW-
 474         *
 475         * This is tricky, because map_pages is in the init section.
 476         * Do a dummy remap of the data section first (the data
 477         * section is already PAGE_KERNEL) to pull in the TLB entries
 478         * for map_kernel */
 479        map_pages(init_begin, __pa(init_begin), init_end - init_begin,
 480                  PAGE_KERNEL_RWX, 1);
 481        /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
 482         * map_pages */
 483        map_pages(init_begin, __pa(init_begin), init_end - init_begin,
 484                  PAGE_KERNEL, 1);
 485
 486        /* force the kernel to see the new TLB entries */
 487        __flush_tlb_range(0, init_begin, kernel_end);
 488
 489        /* finally dump all the instructions which were cached, since the
 490         * pages are no-longer executable */
 491        flush_icache_range(init_begin, init_end);
 492        
 493        free_initmem_default(POISON_FREE_INITMEM);
 494
 495        /* set up a new led state on systems shipped LED State panel */
 496        pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
 497}
 498
 499
 500#ifdef CONFIG_STRICT_KERNEL_RWX
 501void mark_rodata_ro(void)
 502{
 503        /* rodata memory was already mapped with KERNEL_RO access rights by
 504           pagetable_init() and map_pages(). No need to do additional stuff here */
 505        unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
 506
 507        pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
 508}
 509#endif
 510
 511
 512/*
 513 * Just an arbitrary offset to serve as a "hole" between mapping areas
 514 * (between top of physical memory and a potential pcxl dma mapping
 515 * area, and below the vmalloc mapping area).
 516 *
 517 * The current 32K value just means that there will be a 32K "hole"
 518 * between mapping areas. That means that  any out-of-bounds memory
 519 * accesses will hopefully be caught. The vmalloc() routines leaves
 520 * a hole of 4kB between each vmalloced area for the same reason.
 521 */
 522
 523 /* Leave room for gateway page expansion */
 524#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
 525#error KERNEL_MAP_START is in gateway reserved region
 526#endif
 527#define MAP_START (KERNEL_MAP_START)
 528
 529#define VM_MAP_OFFSET  (32*1024)
 530#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
 531                                     & ~(VM_MAP_OFFSET-1)))
 532
 533void *parisc_vmalloc_start __ro_after_init;
 534EXPORT_SYMBOL(parisc_vmalloc_start);
 535
 536#ifdef CONFIG_PA11
 537unsigned long pcxl_dma_start __ro_after_init;
 538#endif
 539
 540void __init mem_init(void)
 541{
 542        /* Do sanity checks on IPC (compat) structures */
 543        BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
 544#ifndef CONFIG_64BIT
 545        BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
 546        BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
 547        BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
 548#endif
 549#ifdef CONFIG_COMPAT
 550        BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
 551        BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
 552        BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
 553        BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
 554#endif
 555
 556        /* Do sanity checks on page table constants */
 557        BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
 558        BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
 559        BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
 560        BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
 561                        > BITS_PER_LONG);
 562
 563        high_memory = __va((max_pfn << PAGE_SHIFT));
 564        set_max_mapnr(max_low_pfn);
 565        memblock_free_all();
 566
 567#ifdef CONFIG_PA11
 568        if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
 569                pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
 570                parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
 571                                                + PCXL_DMA_MAP_SIZE);
 572        } else
 573#endif
 574                parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
 575
 576        mem_init_print_info(NULL);
 577
 578#if 0
 579        /*
 580         * Do not expose the virtual kernel memory layout to userspace.
 581         * But keep code for debugging purposes.
 582         */
 583        printk("virtual kernel memory layout:\n"
 584               "     vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
 585               "     fixmap  : 0x%px - 0x%px   (%4ld kB)\n"
 586               "     memory  : 0x%px - 0x%px   (%4ld MB)\n"
 587               "       .init : 0x%px - 0x%px   (%4ld kB)\n"
 588               "       .data : 0x%px - 0x%px   (%4ld kB)\n"
 589               "       .text : 0x%px - 0x%px   (%4ld kB)\n",
 590
 591               (void*)VMALLOC_START, (void*)VMALLOC_END,
 592               (VMALLOC_END - VMALLOC_START) >> 20,
 593
 594               (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
 595               (unsigned long)(FIXMAP_SIZE / 1024),
 596
 597               __va(0), high_memory,
 598               ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
 599
 600               __init_begin, __init_end,
 601               ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
 602
 603               _etext, _edata,
 604               ((unsigned long)_edata - (unsigned long)_etext) >> 10,
 605
 606               _text, _etext,
 607               ((unsigned long)_etext - (unsigned long)_text) >> 10);
 608#endif
 609}
 610
 611unsigned long *empty_zero_page __ro_after_init;
 612EXPORT_SYMBOL(empty_zero_page);
 613
 614/*
 615 * pagetable_init() sets up the page tables
 616 *
 617 * Note that gateway_init() places the Linux gateway page at page 0.
 618 * Since gateway pages cannot be dereferenced this has the desirable
 619 * side effect of trapping those pesky NULL-reference errors in the
 620 * kernel.
 621 */
 622static void __init pagetable_init(void)
 623{
 624        int range;
 625
 626        /* Map each physical memory range to its kernel vaddr */
 627
 628        for (range = 0; range < npmem_ranges; range++) {
 629                unsigned long start_paddr;
 630                unsigned long end_paddr;
 631                unsigned long size;
 632
 633                start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
 634                size = pmem_ranges[range].pages << PAGE_SHIFT;
 635                end_paddr = start_paddr + size;
 636
 637                map_pages((unsigned long)__va(start_paddr), start_paddr,
 638                          size, PAGE_KERNEL, 0);
 639        }
 640
 641#ifdef CONFIG_BLK_DEV_INITRD
 642        if (initrd_end && initrd_end > mem_limit) {
 643                printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
 644                map_pages(initrd_start, __pa(initrd_start),
 645                          initrd_end - initrd_start, PAGE_KERNEL, 0);
 646        }
 647#endif
 648
 649        empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 650        if (!empty_zero_page)
 651                panic("zero page allocation failed.\n");
 652
 653}
 654
 655static void __init gateway_init(void)
 656{
 657        unsigned long linux_gateway_page_addr;
 658        /* FIXME: This is 'const' in order to trick the compiler
 659           into not treating it as DP-relative data. */
 660        extern void * const linux_gateway_page;
 661
 662        linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
 663
 664        /*
 665         * Setup Linux Gateway page.
 666         *
 667         * The Linux gateway page will reside in kernel space (on virtual
 668         * page 0), so it doesn't need to be aliased into user space.
 669         */
 670
 671        map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
 672                  PAGE_SIZE, PAGE_GATEWAY, 1);
 673}
 674
 675static void __init parisc_bootmem_free(void)
 676{
 677        unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 678
 679        max_zone_pfn[0] = memblock_end_of_DRAM();
 680
 681        free_area_init(max_zone_pfn);
 682}
 683
 684void __init paging_init(void)
 685{
 686        setup_bootmem();
 687        pagetable_init();
 688        gateway_init();
 689        flush_cache_all_local(); /* start with known state */
 690        flush_tlb_all_local(NULL);
 691
 692        sparse_init();
 693        parisc_bootmem_free();
 694}
 695
 696#ifdef CONFIG_PA20
 697
 698/*
 699 * Currently, all PA20 chips have 18 bit protection IDs, which is the
 700 * limiting factor (space ids are 32 bits).
 701 */
 702
 703#define NR_SPACE_IDS 262144
 704
 705#else
 706
 707/*
 708 * Currently we have a one-to-one relationship between space IDs and
 709 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
 710 * support 15 bit protection IDs, so that is the limiting factor.
 711 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
 712 * probably not worth the effort for a special case here.
 713 */
 714
 715#define NR_SPACE_IDS 32768
 716
 717#endif  /* !CONFIG_PA20 */
 718
 719#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
 720#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
 721
 722static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
 723static unsigned long dirty_space_id[SID_ARRAY_SIZE];
 724static unsigned long space_id_index;
 725static unsigned long free_space_ids = NR_SPACE_IDS - 1;
 726static unsigned long dirty_space_ids = 0;
 727
 728static DEFINE_SPINLOCK(sid_lock);
 729
 730unsigned long alloc_sid(void)
 731{
 732        unsigned long index;
 733
 734        spin_lock(&sid_lock);
 735
 736        if (free_space_ids == 0) {
 737                if (dirty_space_ids != 0) {
 738                        spin_unlock(&sid_lock);
 739                        flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
 740                        spin_lock(&sid_lock);
 741                }
 742                BUG_ON(free_space_ids == 0);
 743        }
 744
 745        free_space_ids--;
 746
 747        index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
 748        space_id[BIT_WORD(index)] |= BIT_MASK(index);
 749        space_id_index = index;
 750
 751        spin_unlock(&sid_lock);
 752
 753        return index << SPACEID_SHIFT;
 754}
 755
 756void free_sid(unsigned long spaceid)
 757{
 758        unsigned long index = spaceid >> SPACEID_SHIFT;
 759        unsigned long *dirty_space_offset, mask;
 760
 761        dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
 762        mask = BIT_MASK(index);
 763
 764        spin_lock(&sid_lock);
 765
 766        BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
 767
 768        *dirty_space_offset |= mask;
 769        dirty_space_ids++;
 770
 771        spin_unlock(&sid_lock);
 772}
 773
 774
 775#ifdef CONFIG_SMP
 776static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
 777{
 778        int i;
 779
 780        /* NOTE: sid_lock must be held upon entry */
 781
 782        *ndirtyptr = dirty_space_ids;
 783        if (dirty_space_ids != 0) {
 784            for (i = 0; i < SID_ARRAY_SIZE; i++) {
 785                dirty_array[i] = dirty_space_id[i];
 786                dirty_space_id[i] = 0;
 787            }
 788            dirty_space_ids = 0;
 789        }
 790
 791        return;
 792}
 793
 794static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
 795{
 796        int i;
 797
 798        /* NOTE: sid_lock must be held upon entry */
 799
 800        if (ndirty != 0) {
 801                for (i = 0; i < SID_ARRAY_SIZE; i++) {
 802                        space_id[i] ^= dirty_array[i];
 803                }
 804
 805                free_space_ids += ndirty;
 806                space_id_index = 0;
 807        }
 808}
 809
 810#else /* CONFIG_SMP */
 811
 812static void recycle_sids(void)
 813{
 814        int i;
 815
 816        /* NOTE: sid_lock must be held upon entry */
 817
 818        if (dirty_space_ids != 0) {
 819                for (i = 0; i < SID_ARRAY_SIZE; i++) {
 820                        space_id[i] ^= dirty_space_id[i];
 821                        dirty_space_id[i] = 0;
 822                }
 823
 824                free_space_ids += dirty_space_ids;
 825                dirty_space_ids = 0;
 826                space_id_index = 0;
 827        }
 828}
 829#endif
 830
 831/*
 832 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
 833 * purged, we can safely reuse the space ids that were released but
 834 * not flushed from the tlb.
 835 */
 836
 837#ifdef CONFIG_SMP
 838
 839static unsigned long recycle_ndirty;
 840static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
 841static unsigned int recycle_inuse;
 842
 843void flush_tlb_all(void)
 844{
 845        int do_recycle;
 846
 847        __inc_irq_stat(irq_tlb_count);
 848        do_recycle = 0;
 849        spin_lock(&sid_lock);
 850        if (dirty_space_ids > RECYCLE_THRESHOLD) {
 851            BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
 852            get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
 853            recycle_inuse++;
 854            do_recycle++;
 855        }
 856        spin_unlock(&sid_lock);
 857        on_each_cpu(flush_tlb_all_local, NULL, 1);
 858        if (do_recycle) {
 859            spin_lock(&sid_lock);
 860            recycle_sids(recycle_ndirty,recycle_dirty_array);
 861            recycle_inuse = 0;
 862            spin_unlock(&sid_lock);
 863        }
 864}
 865#else
 866void flush_tlb_all(void)
 867{
 868        __inc_irq_stat(irq_tlb_count);
 869        spin_lock(&sid_lock);
 870        flush_tlb_all_local(NULL);
 871        recycle_sids();
 872        spin_unlock(&sid_lock);
 873}
 874#endif
 875