linux/arch/mips/kernel/setup.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1995 Linus Torvalds
   7 * Copyright (C) 1995 Waldorf Electronics
   8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
   9 * Copyright (C) 1996 Stoned Elipot
  10 * Copyright (C) 1999 Silicon Graphics, Inc.
  11 * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
  12 */
  13#include <linux/init.h>
  14#include <linux/ioport.h>
  15#include <linux/export.h>
  16#include <linux/screen_info.h>
  17#include <linux/memblock.h>
  18#include <linux/initrd.h>
  19#include <linux/root_dev.h>
  20#include <linux/highmem.h>
  21#include <linux/console.h>
  22#include <linux/pfn.h>
  23#include <linux/debugfs.h>
  24#include <linux/kexec.h>
  25#include <linux/sizes.h>
  26#include <linux/device.h>
  27#include <linux/dma-contiguous.h>
  28#include <linux/decompress/generic.h>
  29#include <linux/of_fdt.h>
  30#include <linux/of_reserved_mem.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/bugs.h>
  35#include <asm/cache.h>
  36#include <asm/cdmm.h>
  37#include <asm/cpu.h>
  38#include <asm/debug.h>
  39#include <asm/dma-coherence.h>
  40#include <asm/sections.h>
  41#include <asm/setup.h>
  42#include <asm/smp-ops.h>
  43#include <asm/prom.h>
  44
  45#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
  46const char __section(.appended_dtb) __appended_dtb[0x100000];
  47#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
  48
  49struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
  50
  51EXPORT_SYMBOL(cpu_data);
  52
  53#ifdef CONFIG_VT
  54struct screen_info screen_info;
  55#endif
  56
  57/*
  58 * Setup information
  59 *
  60 * These are initialized so they are in the .data section
  61 */
  62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
  63
  64EXPORT_SYMBOL(mips_machtype);
  65
  66struct boot_mem_map boot_mem_map;
  67
  68static char __initdata command_line[COMMAND_LINE_SIZE];
  69char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
  70
  71#ifdef CONFIG_CMDLINE_BOOL
  72static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
  73#endif
  74
  75/*
  76 * mips_io_port_base is the begin of the address space to which x86 style
  77 * I/O ports are mapped.
  78 */
  79const unsigned long mips_io_port_base = -1;
  80EXPORT_SYMBOL(mips_io_port_base);
  81
  82static struct resource code_resource = { .name = "Kernel code", };
  83static struct resource data_resource = { .name = "Kernel data", };
  84static struct resource bss_resource = { .name = "Kernel bss", };
  85
  86static void *detect_magic __initdata = detect_memory_region;
  87
  88#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
  89unsigned long ARCH_PFN_OFFSET;
  90EXPORT_SYMBOL(ARCH_PFN_OFFSET);
  91#endif
  92
  93void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
  94{
  95        int x = boot_mem_map.nr_map;
  96        int i;
  97
  98        /*
  99         * If the region reaches the top of the physical address space, adjust
 100         * the size slightly so that (start + size) doesn't overflow
 101         */
 102        if (start + size - 1 == PHYS_ADDR_MAX)
 103                --size;
 104
 105        /* Sanity check */
 106        if (start + size < start) {
 107                pr_warn("Trying to add an invalid memory region, skipped\n");
 108                return;
 109        }
 110
 111        /*
 112         * Try to merge with existing entry, if any.
 113         */
 114        for (i = 0; i < boot_mem_map.nr_map; i++) {
 115                struct boot_mem_map_entry *entry = boot_mem_map.map + i;
 116                unsigned long top;
 117
 118                if (entry->type != type)
 119                        continue;
 120
 121                if (start + size < entry->addr)
 122                        continue;                       /* no overlap */
 123
 124                if (entry->addr + entry->size < start)
 125                        continue;                       /* no overlap */
 126
 127                top = max(entry->addr + entry->size, start + size);
 128                entry->addr = min(entry->addr, start);
 129                entry->size = top - entry->addr;
 130
 131                return;
 132        }
 133
 134        if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
 135                pr_err("Ooops! Too many entries in the memory map!\n");
 136                return;
 137        }
 138
 139        boot_mem_map.map[x].addr = start;
 140        boot_mem_map.map[x].size = size;
 141        boot_mem_map.map[x].type = type;
 142        boot_mem_map.nr_map++;
 143}
 144
 145void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
 146{
 147        void *dm = &detect_magic;
 148        phys_addr_t size;
 149
 150        for (size = sz_min; size < sz_max; size <<= 1) {
 151                if (!memcmp(dm, dm + size, sizeof(detect_magic)))
 152                        break;
 153        }
 154
 155        pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
 156                ((unsigned long long) size) / SZ_1M,
 157                (unsigned long long) start,
 158                ((unsigned long long) sz_min) / SZ_1M,
 159                ((unsigned long long) sz_max) / SZ_1M);
 160
 161        add_memory_region(start, size, BOOT_MEM_RAM);
 162}
 163
 164static bool __init __maybe_unused memory_region_available(phys_addr_t start,
 165                                                          phys_addr_t size)
 166{
 167        int i;
 168        bool in_ram = false, free = true;
 169
 170        for (i = 0; i < boot_mem_map.nr_map; i++) {
 171                phys_addr_t start_, end_;
 172
 173                start_ = boot_mem_map.map[i].addr;
 174                end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
 175
 176                switch (boot_mem_map.map[i].type) {
 177                case BOOT_MEM_RAM:
 178                        if (start >= start_ && start + size <= end_)
 179                                in_ram = true;
 180                        break;
 181                case BOOT_MEM_RESERVED:
 182                case BOOT_MEM_NOMAP:
 183                        if ((start >= start_ && start < end_) ||
 184                            (start < start_ && start + size >= start_))
 185                                free = false;
 186                        break;
 187                default:
 188                        continue;
 189                }
 190        }
 191
 192        return in_ram && free;
 193}
 194
 195static void __init print_memory_map(void)
 196{
 197        int i;
 198        const int field = 2 * sizeof(unsigned long);
 199
 200        for (i = 0; i < boot_mem_map.nr_map; i++) {
 201                printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
 202                       field, (unsigned long long) boot_mem_map.map[i].size,
 203                       field, (unsigned long long) boot_mem_map.map[i].addr);
 204
 205                switch (boot_mem_map.map[i].type) {
 206                case BOOT_MEM_RAM:
 207                        printk(KERN_CONT "(usable)\n");
 208                        break;
 209                case BOOT_MEM_INIT_RAM:
 210                        printk(KERN_CONT "(usable after init)\n");
 211                        break;
 212                case BOOT_MEM_ROM_DATA:
 213                        printk(KERN_CONT "(ROM data)\n");
 214                        break;
 215                case BOOT_MEM_RESERVED:
 216                        printk(KERN_CONT "(reserved)\n");
 217                        break;
 218                case BOOT_MEM_NOMAP:
 219                        printk(KERN_CONT "(nomap)\n");
 220                        break;
 221                default:
 222                        printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
 223                        break;
 224                }
 225        }
 226}
 227
 228/*
 229 * Manage initrd
 230 */
 231#ifdef CONFIG_BLK_DEV_INITRD
 232
 233static int __init rd_start_early(char *p)
 234{
 235        unsigned long start = memparse(p, &p);
 236
 237#ifdef CONFIG_64BIT
 238        /* Guess if the sign extension was forgotten by bootloader */
 239        if (start < XKPHYS)
 240                start = (int)start;
 241#endif
 242        initrd_start = start;
 243        initrd_end += start;
 244        return 0;
 245}
 246early_param("rd_start", rd_start_early);
 247
 248static int __init rd_size_early(char *p)
 249{
 250        initrd_end += memparse(p, &p);
 251        return 0;
 252}
 253early_param("rd_size", rd_size_early);
 254
 255/* it returns the next free pfn after initrd */
 256static unsigned long __init init_initrd(void)
 257{
 258        unsigned long end;
 259
 260        /*
 261         * Board specific code or command line parser should have
 262         * already set up initrd_start and initrd_end. In these cases
 263         * perfom sanity checks and use them if all looks good.
 264         */
 265        if (!initrd_start || initrd_end <= initrd_start)
 266                goto disable;
 267
 268        if (initrd_start & ~PAGE_MASK) {
 269                pr_err("initrd start must be page aligned\n");
 270                goto disable;
 271        }
 272        if (initrd_start < PAGE_OFFSET) {
 273                pr_err("initrd start < PAGE_OFFSET\n");
 274                goto disable;
 275        }
 276
 277        /*
 278         * Sanitize initrd addresses. For example firmware
 279         * can't guess if they need to pass them through
 280         * 64-bits values if the kernel has been built in pure
 281         * 32-bit. We need also to switch from KSEG0 to XKPHYS
 282         * addresses now, so the code can now safely use __pa().
 283         */
 284        end = __pa(initrd_end);
 285        initrd_end = (unsigned long)__va(end);
 286        initrd_start = (unsigned long)__va(__pa(initrd_start));
 287
 288        ROOT_DEV = Root_RAM0;
 289        return PFN_UP(end);
 290disable:
 291        initrd_start = 0;
 292        initrd_end = 0;
 293        return 0;
 294}
 295
 296/* In some conditions (e.g. big endian bootloader with a little endian
 297   kernel), the initrd might appear byte swapped.  Try to detect this and
 298   byte swap it if needed.  */
 299static void __init maybe_bswap_initrd(void)
 300{
 301#if defined(CONFIG_CPU_CAVIUM_OCTEON)
 302        u64 buf;
 303
 304        /* Check for CPIO signature */
 305        if (!memcmp((void *)initrd_start, "070701", 6))
 306                return;
 307
 308        /* Check for compressed initrd */
 309        if (decompress_method((unsigned char *)initrd_start, 8, NULL))
 310                return;
 311
 312        /* Try again with a byte swapped header */
 313        buf = swab64p((u64 *)initrd_start);
 314        if (!memcmp(&buf, "070701", 6) ||
 315            decompress_method((unsigned char *)(&buf), 8, NULL)) {
 316                unsigned long i;
 317
 318                pr_info("Byteswapped initrd detected\n");
 319                for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
 320                        swab64s((u64 *)i);
 321        }
 322#endif
 323}
 324
 325static void __init finalize_initrd(void)
 326{
 327        unsigned long size = initrd_end - initrd_start;
 328
 329        if (size == 0) {
 330                printk(KERN_INFO "Initrd not found or empty");
 331                goto disable;
 332        }
 333        if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
 334                printk(KERN_ERR "Initrd extends beyond end of memory");
 335                goto disable;
 336        }
 337
 338        maybe_bswap_initrd();
 339
 340        memblock_reserve(__pa(initrd_start), size);
 341        initrd_below_start_ok = 1;
 342
 343        pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
 344                initrd_start, size);
 345        return;
 346disable:
 347        printk(KERN_CONT " - disabling initrd\n");
 348        initrd_start = 0;
 349        initrd_end = 0;
 350}
 351
 352#else  /* !CONFIG_BLK_DEV_INITRD */
 353
 354static unsigned long __init init_initrd(void)
 355{
 356        return 0;
 357}
 358
 359#define finalize_initrd()       do {} while (0)
 360
 361#endif
 362
 363/*
 364 * Initialize the bootmem allocator. It also setup initrd related data
 365 * if needed.
 366 */
 367#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
 368
 369static void __init bootmem_init(void)
 370{
 371        init_initrd();
 372        finalize_initrd();
 373}
 374
 375#else  /* !CONFIG_SGI_IP27 */
 376
 377static void __init bootmem_init(void)
 378{
 379        phys_addr_t ramstart = PHYS_ADDR_MAX;
 380        int i;
 381
 382        /*
 383         * Sanity check any INITRD first. We don't take it into account
 384         * for bootmem setup initially, rely on the end-of-kernel-code
 385         * as our memory range starting point. Once bootmem is inited we
 386         * will reserve the area used for the initrd.
 387         */
 388        init_initrd();
 389
 390        /* Reserve memory occupied by kernel. */
 391        memblock_reserve(__pa_symbol(&_text),
 392                        __pa_symbol(&_end) - __pa_symbol(&_text));
 393
 394        /*
 395         * max_low_pfn is not a number of pages. The number of pages
 396         * of the system is given by 'max_low_pfn - min_low_pfn'.
 397         */
 398        min_low_pfn = ~0UL;
 399        max_low_pfn = 0;
 400
 401        /* Find the highest and lowest page frame numbers we have available. */
 402        for (i = 0; i < boot_mem_map.nr_map; i++) {
 403                unsigned long start, end;
 404
 405                if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
 406                        continue;
 407
 408                start = PFN_UP(boot_mem_map.map[i].addr);
 409                end = PFN_DOWN(boot_mem_map.map[i].addr
 410                                + boot_mem_map.map[i].size);
 411
 412                ramstart = min(ramstart, boot_mem_map.map[i].addr);
 413
 414#ifndef CONFIG_HIGHMEM
 415                /*
 416                 * Skip highmem here so we get an accurate max_low_pfn if low
 417                 * memory stops short of high memory.
 418                 * If the region overlaps HIGHMEM_START, end is clipped so
 419                 * max_pfn excludes the highmem portion.
 420                 */
 421                if (start >= PFN_DOWN(HIGHMEM_START))
 422                        continue;
 423                if (end > PFN_DOWN(HIGHMEM_START))
 424                        end = PFN_DOWN(HIGHMEM_START);
 425#endif
 426
 427                if (end > max_low_pfn)
 428                        max_low_pfn = end;
 429                if (start < min_low_pfn)
 430                        min_low_pfn = start;
 431        }
 432
 433        if (min_low_pfn >= max_low_pfn)
 434                panic("Incorrect memory mapping !!!");
 435
 436#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 437        ARCH_PFN_OFFSET = PFN_UP(ramstart);
 438#else
 439        /*
 440         * Reserve any memory between the start of RAM and PHYS_OFFSET
 441         */
 442        if (ramstart > PHYS_OFFSET) {
 443                add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
 444                                  BOOT_MEM_RESERVED);
 445                memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
 446        }
 447
 448        if (min_low_pfn > ARCH_PFN_OFFSET) {
 449                pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
 450                        (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
 451                        min_low_pfn - ARCH_PFN_OFFSET);
 452        } else if (ARCH_PFN_OFFSET - min_low_pfn > 0UL) {
 453                pr_info("%lu free pages won't be used\n",
 454                        ARCH_PFN_OFFSET - min_low_pfn);
 455        }
 456        min_low_pfn = ARCH_PFN_OFFSET;
 457#endif
 458
 459        /*
 460         * Determine low and high memory ranges
 461         */
 462        max_pfn = max_low_pfn;
 463        if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
 464#ifdef CONFIG_HIGHMEM
 465                highstart_pfn = PFN_DOWN(HIGHMEM_START);
 466                highend_pfn = max_low_pfn;
 467#endif
 468                max_low_pfn = PFN_DOWN(HIGHMEM_START);
 469        }
 470
 471        /* Install all valid RAM ranges to the memblock memory region */
 472        for (i = 0; i < boot_mem_map.nr_map; i++) {
 473                unsigned long start, end;
 474
 475                start = PFN_UP(boot_mem_map.map[i].addr);
 476                end = PFN_DOWN(boot_mem_map.map[i].addr
 477                                + boot_mem_map.map[i].size);
 478
 479                if (start < min_low_pfn)
 480                        start = min_low_pfn;
 481#ifndef CONFIG_HIGHMEM
 482                /* Ignore highmem regions if highmem is unsupported */
 483                if (end > max_low_pfn)
 484                        end = max_low_pfn;
 485#endif
 486                if (end <= start)
 487                        continue;
 488
 489                memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
 490
 491                /* Reserve any memory except the ordinary RAM ranges. */
 492                switch (boot_mem_map.map[i].type) {
 493                case BOOT_MEM_RAM:
 494                        break;
 495                case BOOT_MEM_NOMAP: /* Discard the range from the system. */
 496                        memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start));
 497                        continue;
 498                default: /* Reserve the rest of the memory types at boot time */
 499                        memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start));
 500                        break;
 501                }
 502
 503                /*
 504                 * In any case the added to the memblock memory regions
 505                 * (highmem/lowmem, available/reserved, etc) are considered
 506                 * as present, so inform sparsemem about them.
 507                 */
 508                memory_present(0, start, end);
 509        }
 510
 511        /*
 512         * Reserve initrd memory if needed.
 513         */
 514        finalize_initrd();
 515}
 516
 517#endif  /* CONFIG_SGI_IP27 */
 518
 519static int usermem __initdata;
 520
 521static int __init early_parse_mem(char *p)
 522{
 523        phys_addr_t start, size;
 524
 525        /*
 526         * If a user specifies memory size, we
 527         * blow away any automatically generated
 528         * size.
 529         */
 530        if (usermem == 0) {
 531                boot_mem_map.nr_map = 0;
 532                usermem = 1;
 533        }
 534        start = 0;
 535        size = memparse(p, &p);
 536        if (*p == '@')
 537                start = memparse(p + 1, &p);
 538
 539        add_memory_region(start, size, BOOT_MEM_RAM);
 540
 541        return 0;
 542}
 543early_param("mem", early_parse_mem);
 544
 545static int __init early_parse_memmap(char *p)
 546{
 547        char *oldp;
 548        u64 start_at, mem_size;
 549
 550        if (!p)
 551                return -EINVAL;
 552
 553        if (!strncmp(p, "exactmap", 8)) {
 554                pr_err("\"memmap=exactmap\" invalid on MIPS\n");
 555                return 0;
 556        }
 557
 558        oldp = p;
 559        mem_size = memparse(p, &p);
 560        if (p == oldp)
 561                return -EINVAL;
 562
 563        if (*p == '@') {
 564                start_at = memparse(p+1, &p);
 565                add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
 566        } else if (*p == '#') {
 567                pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
 568                return -EINVAL;
 569        } else if (*p == '$') {
 570                start_at = memparse(p+1, &p);
 571                add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
 572        } else {
 573                pr_err("\"memmap\" invalid format!\n");
 574                return -EINVAL;
 575        }
 576
 577        if (*p == '\0') {
 578                usermem = 1;
 579                return 0;
 580        } else
 581                return -EINVAL;
 582}
 583early_param("memmap", early_parse_memmap);
 584
 585#ifdef CONFIG_PROC_VMCORE
 586unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
 587static int __init early_parse_elfcorehdr(char *p)
 588{
 589        int i;
 590
 591        setup_elfcorehdr = memparse(p, &p);
 592
 593        for (i = 0; i < boot_mem_map.nr_map; i++) {
 594                unsigned long start = boot_mem_map.map[i].addr;
 595                unsigned long end = (boot_mem_map.map[i].addr +
 596                                     boot_mem_map.map[i].size);
 597                if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
 598                        /*
 599                         * Reserve from the elf core header to the end of
 600                         * the memory segment, that should all be kdump
 601                         * reserved memory.
 602                         */
 603                        setup_elfcorehdr_size = end - setup_elfcorehdr;
 604                        break;
 605                }
 606        }
 607        /*
 608         * If we don't find it in the memory map, then we shouldn't
 609         * have to worry about it, as the new kernel won't use it.
 610         */
 611        return 0;
 612}
 613early_param("elfcorehdr", early_parse_elfcorehdr);
 614#endif
 615
 616static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
 617{
 618        phys_addr_t size;
 619        int i;
 620
 621        size = end - mem;
 622        if (!size)
 623                return;
 624
 625        /* Make sure it is in the boot_mem_map */
 626        for (i = 0; i < boot_mem_map.nr_map; i++) {
 627                if (mem >= boot_mem_map.map[i].addr &&
 628                    mem < (boot_mem_map.map[i].addr +
 629                           boot_mem_map.map[i].size))
 630                        return;
 631        }
 632        add_memory_region(mem, size, type);
 633}
 634
 635#ifdef CONFIG_KEXEC
 636static inline unsigned long long get_total_mem(void)
 637{
 638        unsigned long long total;
 639
 640        total = max_pfn - min_low_pfn;
 641        return total << PAGE_SHIFT;
 642}
 643
 644static void __init mips_parse_crashkernel(void)
 645{
 646        unsigned long long total_mem;
 647        unsigned long long crash_size, crash_base;
 648        int ret;
 649
 650        total_mem = get_total_mem();
 651        ret = parse_crashkernel(boot_command_line, total_mem,
 652                                &crash_size, &crash_base);
 653        if (ret != 0 || crash_size <= 0)
 654                return;
 655
 656        if (!memory_region_available(crash_base, crash_size)) {
 657                pr_warn("Invalid memory region reserved for crash kernel\n");
 658                return;
 659        }
 660
 661        crashk_res.start = crash_base;
 662        crashk_res.end   = crash_base + crash_size - 1;
 663}
 664
 665static void __init request_crashkernel(struct resource *res)
 666{
 667        int ret;
 668
 669        if (crashk_res.start == crashk_res.end)
 670                return;
 671
 672        ret = request_resource(res, &crashk_res);
 673        if (!ret)
 674                pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
 675                        (unsigned long)((crashk_res.end -
 676                                         crashk_res.start + 1) >> 20),
 677                        (unsigned long)(crashk_res.start  >> 20));
 678}
 679#else /* !defined(CONFIG_KEXEC)         */
 680static void __init mips_parse_crashkernel(void)
 681{
 682}
 683
 684static void __init request_crashkernel(struct resource *res)
 685{
 686}
 687#endif /* !defined(CONFIG_KEXEC)  */
 688
 689#define USE_PROM_CMDLINE        IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
 690#define USE_DTB_CMDLINE         IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
 691#define EXTEND_WITH_PROM        IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
 692#define BUILTIN_EXTEND_WITH_PROM        \
 693        IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
 694
 695/*
 696 * arch_mem_init - initialize memory management subsystem
 697 *
 698 *  o plat_mem_setup() detects the memory configuration and will record detected
 699 *    memory areas using add_memory_region.
 700 *
 701 * At this stage the memory configuration of the system is known to the
 702 * kernel but generic memory management system is still entirely uninitialized.
 703 *
 704 *  o bootmem_init()
 705 *  o sparse_init()
 706 *  o paging_init()
 707 *  o dma_contiguous_reserve()
 708 *
 709 * At this stage the bootmem allocator is ready to use.
 710 *
 711 * NOTE: historically plat_mem_setup did the entire platform initialization.
 712 *       This was rather impractical because it meant plat_mem_setup had to
 713 * get away without any kind of memory allocator.  To keep old code from
 714 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
 715 * initialization hook for anything else was introduced.
 716 */
 717static void __init arch_mem_init(char **cmdline_p)
 718{
 719        extern void plat_mem_setup(void);
 720
 721        /*
 722         * Initialize boot_command_line to an innocuous but non-empty string in
 723         * order to prevent early_init_dt_scan_chosen() from copying
 724         * CONFIG_CMDLINE into it without our knowledge. We handle
 725         * CONFIG_CMDLINE ourselves below & don't want to duplicate its
 726         * content because repeating arguments can be problematic.
 727         */
 728        strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
 729
 730        /* call board setup routine */
 731        plat_mem_setup();
 732        memblock_set_bottom_up(true);
 733
 734        /*
 735         * Make sure all kernel memory is in the maps.  The "UP" and
 736         * "DOWN" are opposite for initdata since if it crosses over
 737         * into another memory section you don't want that to be
 738         * freed when the initdata is freed.
 739         */
 740        arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
 741                         PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
 742                         BOOT_MEM_RAM);
 743        arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
 744                         PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
 745                         BOOT_MEM_INIT_RAM);
 746        arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT,
 747                         PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT,
 748                         BOOT_MEM_RAM);
 749
 750        pr_info("Determined physical RAM map:\n");
 751        print_memory_map();
 752
 753#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
 754        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 755#else
 756        if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
 757            (USE_DTB_CMDLINE && !boot_command_line[0]))
 758                strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 759
 760        if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
 761                if (boot_command_line[0])
 762                        strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 763                strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 764        }
 765
 766#if defined(CONFIG_CMDLINE_BOOL)
 767        if (builtin_cmdline[0]) {
 768                if (boot_command_line[0])
 769                        strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 770                strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 771        }
 772
 773        if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
 774                if (boot_command_line[0])
 775                        strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
 776                strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
 777        }
 778#endif
 779#endif
 780        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 781
 782        *cmdline_p = command_line;
 783
 784        parse_early_param();
 785
 786        if (usermem) {
 787                pr_info("User-defined physical RAM map:\n");
 788                print_memory_map();
 789        }
 790
 791        early_init_fdt_reserve_self();
 792        early_init_fdt_scan_reserved_mem();
 793
 794        bootmem_init();
 795
 796        /*
 797         * Prevent memblock from allocating high memory.
 798         * This cannot be done before max_low_pfn is detected, so up
 799         * to this point is possible to only reserve physical memory
 800         * with memblock_reserve; memblock_alloc* can be used
 801         * only after this point
 802         */
 803        memblock_set_current_limit(PFN_PHYS(max_low_pfn));
 804
 805#ifdef CONFIG_PROC_VMCORE
 806        if (setup_elfcorehdr && setup_elfcorehdr_size) {
 807                printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
 808                       setup_elfcorehdr, setup_elfcorehdr_size);
 809                memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
 810        }
 811#endif
 812
 813        mips_parse_crashkernel();
 814#ifdef CONFIG_KEXEC
 815        if (crashk_res.start != crashk_res.end)
 816                memblock_reserve(crashk_res.start,
 817                                 crashk_res.end - crashk_res.start + 1);
 818#endif
 819        device_tree_init();
 820        sparse_init();
 821        plat_swiotlb_setup();
 822
 823        dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
 824
 825        /* Reserve for hibernation. */
 826        memblock_reserve(__pa_symbol(&__nosave_begin),
 827                __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
 828
 829        fdt_init_reserved_mem();
 830
 831        memblock_dump_all();
 832
 833        early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn));
 834}
 835
 836static void __init resource_init(void)
 837{
 838        int i;
 839
 840        if (UNCAC_BASE != IO_BASE)
 841                return;
 842
 843        code_resource.start = __pa_symbol(&_text);
 844        code_resource.end = __pa_symbol(&_etext) - 1;
 845        data_resource.start = __pa_symbol(&_etext);
 846        data_resource.end = __pa_symbol(&_edata) - 1;
 847        bss_resource.start = __pa_symbol(&__bss_start);
 848        bss_resource.end = __pa_symbol(&__bss_stop) - 1;
 849
 850        for (i = 0; i < boot_mem_map.nr_map; i++) {
 851                struct resource *res;
 852                unsigned long start, end;
 853
 854                start = boot_mem_map.map[i].addr;
 855                end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
 856                if (start >= HIGHMEM_START)
 857                        continue;
 858                if (end >= HIGHMEM_START)
 859                        end = HIGHMEM_START - 1;
 860
 861                res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 862                if (!res)
 863                        panic("%s: Failed to allocate %zu bytes\n", __func__,
 864                              sizeof(struct resource));
 865
 866                res->start = start;
 867                res->end = end;
 868                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 869
 870                switch (boot_mem_map.map[i].type) {
 871                case BOOT_MEM_RAM:
 872                case BOOT_MEM_INIT_RAM:
 873                case BOOT_MEM_ROM_DATA:
 874                        res->name = "System RAM";
 875                        res->flags |= IORESOURCE_SYSRAM;
 876                        break;
 877                case BOOT_MEM_RESERVED:
 878                case BOOT_MEM_NOMAP:
 879                default:
 880                        res->name = "reserved";
 881                }
 882
 883                request_resource(&iomem_resource, res);
 884
 885                /*
 886                 *  We don't know which RAM region contains kernel data,
 887                 *  so we try it repeatedly and let the resource manager
 888                 *  test it.
 889                 */
 890                request_resource(res, &code_resource);
 891                request_resource(res, &data_resource);
 892                request_resource(res, &bss_resource);
 893                request_crashkernel(res);
 894        }
 895}
 896
 897#ifdef CONFIG_SMP
 898static void __init prefill_possible_map(void)
 899{
 900        int i, possible = num_possible_cpus();
 901
 902        if (possible > nr_cpu_ids)
 903                possible = nr_cpu_ids;
 904
 905        for (i = 0; i < possible; i++)
 906                set_cpu_possible(i, true);
 907        for (; i < NR_CPUS; i++)
 908                set_cpu_possible(i, false);
 909
 910        nr_cpu_ids = possible;
 911}
 912#else
 913static inline void prefill_possible_map(void) {}
 914#endif
 915
 916void __init setup_arch(char **cmdline_p)
 917{
 918        cpu_probe();
 919        mips_cm_probe();
 920        prom_init();
 921
 922        setup_early_fdc_console();
 923#ifdef CONFIG_EARLY_PRINTK
 924        setup_early_printk();
 925#endif
 926        cpu_report();
 927        check_bugs_early();
 928
 929#if defined(CONFIG_VT)
 930#if defined(CONFIG_VGA_CONSOLE)
 931        conswitchp = &vga_con;
 932#elif defined(CONFIG_DUMMY_CONSOLE)
 933        conswitchp = &dummy_con;
 934#endif
 935#endif
 936
 937        arch_mem_init(cmdline_p);
 938
 939        resource_init();
 940        plat_smp_setup();
 941        prefill_possible_map();
 942
 943        cpu_cache_init();
 944        paging_init();
 945}
 946
 947unsigned long kernelsp[NR_CPUS];
 948unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
 949
 950#ifdef CONFIG_USE_OF
 951unsigned long fw_passed_dtb;
 952#endif
 953
 954#ifdef CONFIG_DEBUG_FS
 955struct dentry *mips_debugfs_dir;
 956static int __init debugfs_mips(void)
 957{
 958        mips_debugfs_dir = debugfs_create_dir("mips", NULL);
 959        return 0;
 960}
 961arch_initcall(debugfs_mips);
 962#endif
 963
 964#ifdef CONFIG_DMA_MAYBE_COHERENT
 965/* User defined DMA coherency from command line. */
 966enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
 967EXPORT_SYMBOL_GPL(coherentio);
 968int hw_coherentio = 0;  /* Actual hardware supported DMA coherency setting. */
 969
 970static int __init setcoherentio(char *str)
 971{
 972        coherentio = IO_COHERENCE_ENABLED;
 973        pr_info("Hardware DMA cache coherency (command line)\n");
 974        return 0;
 975}
 976early_param("coherentio", setcoherentio);
 977
 978static int __init setnocoherentio(char *str)
 979{
 980        coherentio = IO_COHERENCE_DISABLED;
 981        pr_info("Software DMA cache coherency (command line)\n");
 982        return 0;
 983}
 984early_param("nocoherentio", setnocoherentio);
 985#endif
 986