linux/arch/mips/mm/init.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 2000 Ralf Baechle
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
   9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
  10 */
  11#include <linux/bug.h>
  12#include <linux/init.h>
  13#include <linux/export.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/smp.h>
  17#include <linux/kernel.h>
  18#include <linux/errno.h>
  19#include <linux/string.h>
  20#include <linux/types.h>
  21#include <linux/pagemap.h>
  22#include <linux/ptrace.h>
  23#include <linux/mman.h>
  24#include <linux/mm.h>
  25#include <linux/memblock.h>
  26#include <linux/highmem.h>
  27#include <linux/swap.h>
  28#include <linux/proc_fs.h>
  29#include <linux/pfn.h>
  30#include <linux/hardirq.h>
  31#include <linux/gfp.h>
  32#include <linux/kcore.h>
  33#include <linux/initrd.h>
  34
  35#include <asm/bootinfo.h>
  36#include <asm/cachectl.h>
  37#include <asm/cpu.h>
  38#include <asm/dma.h>
  39#include <asm/kmap_types.h>
  40#include <asm/maar.h>
  41#include <asm/mmu_context.h>
  42#include <asm/sections.h>
  43#include <asm/pgalloc.h>
  44#include <asm/tlb.h>
  45#include <asm/fixmap.h>
  46
  47/*
  48 * We have up to 8 empty zeroed pages so we can map one of the right colour
  49 * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
  50 * where we have to avoid VCED / VECI exceptions for good performance at
  51 * any price.  Since page is never written to after the initialization we
  52 * don't have to care about aliases on other CPUs.
  53 */
  54unsigned long empty_zero_page, zero_page_mask;
  55EXPORT_SYMBOL_GPL(empty_zero_page);
  56EXPORT_SYMBOL(zero_page_mask);
  57
  58/*
  59 * Not static inline because used by IP27 special magic initialization code
  60 */
  61void setup_zero_pages(void)
  62{
  63        unsigned int order, i;
  64        struct page *page;
  65
  66        if (cpu_has_vce)
  67                order = 3;
  68        else
  69                order = 0;
  70
  71        empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  72        if (!empty_zero_page)
  73                panic("Oh boy, that early out of memory?");
  74
  75        page = virt_to_page((void *)empty_zero_page);
  76        split_page(page, order);
  77        for (i = 0; i < (1 << order); i++, page++)
  78                mark_page_reserved(page);
  79
  80        zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  81}
  82
  83static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
  84{
  85        enum fixed_addresses idx;
  86        unsigned int old_mmid;
  87        unsigned long vaddr, flags, entrylo;
  88        unsigned long old_ctx;
  89        pte_t pte;
  90        int tlbidx;
  91
  92        BUG_ON(Page_dcache_dirty(page));
  93
  94        preempt_disable();
  95        pagefault_disable();
  96        idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
  97        idx += in_interrupt() ? FIX_N_COLOURS : 0;
  98        vaddr = __fix_to_virt(FIX_CMAP_END - idx);
  99        pte = mk_pte(page, prot);
 100#if defined(CONFIG_XPA)
 101        entrylo = pte_to_entrylo(pte.pte_high);
 102#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 103        entrylo = pte.pte_high;
 104#else
 105        entrylo = pte_to_entrylo(pte_val(pte));
 106#endif
 107
 108        local_irq_save(flags);
 109        old_ctx = read_c0_entryhi();
 110        write_c0_entryhi(vaddr & (PAGE_MASK << 1));
 111        write_c0_entrylo0(entrylo);
 112        write_c0_entrylo1(entrylo);
 113        if (cpu_has_mmid) {
 114                old_mmid = read_c0_memorymapid();
 115                write_c0_memorymapid(MMID_KERNEL_WIRED);
 116        }
 117#ifdef CONFIG_XPA
 118        if (cpu_has_xpa) {
 119                entrylo = (pte.pte_low & _PFNX_MASK);
 120                writex_c0_entrylo0(entrylo);
 121                writex_c0_entrylo1(entrylo);
 122        }
 123#endif
 124        tlbidx = num_wired_entries();
 125        write_c0_wired(tlbidx + 1);
 126        write_c0_index(tlbidx);
 127        mtc0_tlbw_hazard();
 128        tlb_write_indexed();
 129        tlbw_use_hazard();
 130        write_c0_entryhi(old_ctx);
 131        if (cpu_has_mmid)
 132                write_c0_memorymapid(old_mmid);
 133        local_irq_restore(flags);
 134
 135        return (void*) vaddr;
 136}
 137
 138void *kmap_coherent(struct page *page, unsigned long addr)
 139{
 140        return __kmap_pgprot(page, addr, PAGE_KERNEL);
 141}
 142
 143void *kmap_noncoherent(struct page *page, unsigned long addr)
 144{
 145        return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
 146}
 147
 148void kunmap_coherent(void)
 149{
 150        unsigned int wired;
 151        unsigned long flags, old_ctx;
 152
 153        local_irq_save(flags);
 154        old_ctx = read_c0_entryhi();
 155        wired = num_wired_entries() - 1;
 156        write_c0_wired(wired);
 157        write_c0_index(wired);
 158        write_c0_entryhi(UNIQUE_ENTRYHI(wired));
 159        write_c0_entrylo0(0);
 160        write_c0_entrylo1(0);
 161        mtc0_tlbw_hazard();
 162        tlb_write_indexed();
 163        tlbw_use_hazard();
 164        write_c0_entryhi(old_ctx);
 165        local_irq_restore(flags);
 166        pagefault_enable();
 167        preempt_enable();
 168}
 169
 170void copy_user_highpage(struct page *to, struct page *from,
 171        unsigned long vaddr, struct vm_area_struct *vma)
 172{
 173        void *vfrom, *vto;
 174
 175        vto = kmap_atomic(to);
 176        if (cpu_has_dc_aliases &&
 177            page_mapcount(from) && !Page_dcache_dirty(from)) {
 178                vfrom = kmap_coherent(from, vaddr);
 179                copy_page(vto, vfrom);
 180                kunmap_coherent();
 181        } else {
 182                vfrom = kmap_atomic(from);
 183                copy_page(vto, vfrom);
 184                kunmap_atomic(vfrom);
 185        }
 186        if ((!cpu_has_ic_fills_f_dc) ||
 187            pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
 188                flush_data_cache_page((unsigned long)vto);
 189        kunmap_atomic(vto);
 190        /* Make sure this page is cleared on other CPU's too before using it */
 191        smp_wmb();
 192}
 193
 194void copy_to_user_page(struct vm_area_struct *vma,
 195        struct page *page, unsigned long vaddr, void *dst, const void *src,
 196        unsigned long len)
 197{
 198        if (cpu_has_dc_aliases &&
 199            page_mapcount(page) && !Page_dcache_dirty(page)) {
 200                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 201                memcpy(vto, src, len);
 202                kunmap_coherent();
 203        } else {
 204                memcpy(dst, src, len);
 205                if (cpu_has_dc_aliases)
 206                        SetPageDcacheDirty(page);
 207        }
 208        if (vma->vm_flags & VM_EXEC)
 209                flush_cache_page(vma, vaddr, page_to_pfn(page));
 210}
 211
 212void copy_from_user_page(struct vm_area_struct *vma,
 213        struct page *page, unsigned long vaddr, void *dst, const void *src,
 214        unsigned long len)
 215{
 216        if (cpu_has_dc_aliases &&
 217            page_mapcount(page) && !Page_dcache_dirty(page)) {
 218                void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 219                memcpy(dst, vfrom, len);
 220                kunmap_coherent();
 221        } else {
 222                memcpy(dst, src, len);
 223                if (cpu_has_dc_aliases)
 224                        SetPageDcacheDirty(page);
 225        }
 226}
 227EXPORT_SYMBOL_GPL(copy_from_user_page);
 228
 229void __init fixrange_init(unsigned long start, unsigned long end,
 230        pgd_t *pgd_base)
 231{
 232#ifdef CONFIG_HIGHMEM
 233        pgd_t *pgd;
 234        pud_t *pud;
 235        pmd_t *pmd;
 236        pte_t *pte;
 237        int i, j, k;
 238        unsigned long vaddr;
 239
 240        vaddr = start;
 241        i = pgd_index(vaddr);
 242        j = pud_index(vaddr);
 243        k = pmd_index(vaddr);
 244        pgd = pgd_base + i;
 245
 246        for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
 247                pud = (pud_t *)pgd;
 248                for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
 249                        pmd = (pmd_t *)pud;
 250                        for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
 251                                if (pmd_none(*pmd)) {
 252                                        pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
 253                                                                           PAGE_SIZE);
 254                                        if (!pte)
 255                                                panic("%s: Failed to allocate %lu bytes align=%lx\n",
 256                                                      __func__, PAGE_SIZE,
 257                                                      PAGE_SIZE);
 258
 259                                        set_pmd(pmd, __pmd((unsigned long)pte));
 260                                        BUG_ON(pte != pte_offset_kernel(pmd, 0));
 261                                }
 262                                vaddr += PMD_SIZE;
 263                        }
 264                        k = 0;
 265                }
 266                j = 0;
 267        }
 268#endif
 269}
 270
 271struct maar_walk_info {
 272        struct maar_config cfg[16];
 273        unsigned int num_cfg;
 274};
 275
 276static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
 277                         void *data)
 278{
 279        struct maar_walk_info *wi = data;
 280        struct maar_config *cfg = &wi->cfg[wi->num_cfg];
 281        unsigned int maar_align;
 282
 283        /* MAAR registers hold physical addresses right shifted by 4 bits */
 284        maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
 285
 286        /* Fill in the MAAR config entry */
 287        cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
 288        cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
 289        cfg->attrs = MIPS_MAAR_S;
 290
 291        /* Ensure we don't overflow the cfg array */
 292        if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
 293                wi->num_cfg++;
 294
 295        return 0;
 296}
 297
 298
 299unsigned __weak platform_maar_init(unsigned num_pairs)
 300{
 301        unsigned int num_configured;
 302        struct maar_walk_info wi;
 303
 304        wi.num_cfg = 0;
 305        walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
 306
 307        num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
 308        if (num_configured < wi.num_cfg)
 309                pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
 310                        num_pairs, wi.num_cfg);
 311
 312        return num_configured;
 313}
 314
 315void maar_init(void)
 316{
 317        unsigned num_maars, used, i;
 318        phys_addr_t lower, upper, attr;
 319        static struct {
 320                struct maar_config cfgs[3];
 321                unsigned used;
 322        } recorded = { { { 0 } }, 0 };
 323
 324        if (!cpu_has_maar)
 325                return;
 326
 327        /* Detect the number of MAARs */
 328        write_c0_maari(~0);
 329        back_to_back_c0_hazard();
 330        num_maars = read_c0_maari() + 1;
 331
 332        /* MAARs should be in pairs */
 333        WARN_ON(num_maars % 2);
 334
 335        /* Set MAARs using values we recorded already */
 336        if (recorded.used) {
 337                used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
 338                BUG_ON(used != recorded.used);
 339        } else {
 340                /* Configure the required MAARs */
 341                used = platform_maar_init(num_maars / 2);
 342        }
 343
 344        /* Disable any further MAARs */
 345        for (i = (used * 2); i < num_maars; i++) {
 346                write_c0_maari(i);
 347                back_to_back_c0_hazard();
 348                write_c0_maar(0);
 349                back_to_back_c0_hazard();
 350        }
 351
 352        if (recorded.used)
 353                return;
 354
 355        pr_info("MAAR configuration:\n");
 356        for (i = 0; i < num_maars; i += 2) {
 357                write_c0_maari(i);
 358                back_to_back_c0_hazard();
 359                upper = read_c0_maar();
 360#ifdef CONFIG_XPA
 361                upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
 362#endif
 363
 364                write_c0_maari(i + 1);
 365                back_to_back_c0_hazard();
 366                lower = read_c0_maar();
 367#ifdef CONFIG_XPA
 368                lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
 369#endif
 370
 371                attr = lower & upper;
 372                lower = (lower & MIPS_MAAR_ADDR) << 4;
 373                upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
 374
 375                pr_info("  [%d]: ", i / 2);
 376                if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
 377                        pr_cont("disabled\n");
 378                        continue;
 379                }
 380
 381                pr_cont("%pa-%pa", &lower, &upper);
 382
 383                if (attr & MIPS_MAAR_S)
 384                        pr_cont(" speculate");
 385
 386                pr_cont("\n");
 387
 388                /* Record the setup for use on secondary CPUs */
 389                if (used <= ARRAY_SIZE(recorded.cfgs)) {
 390                        recorded.cfgs[recorded.used].lower = lower;
 391                        recorded.cfgs[recorded.used].upper = upper;
 392                        recorded.cfgs[recorded.used].attrs = attr;
 393                        recorded.used++;
 394                }
 395        }
 396}
 397
 398#ifndef CONFIG_NEED_MULTIPLE_NODES
 399void __init paging_init(void)
 400{
 401        unsigned long max_zone_pfns[MAX_NR_ZONES];
 402
 403        pagetable_init();
 404
 405#ifdef CONFIG_HIGHMEM
 406        kmap_init();
 407#endif
 408#ifdef CONFIG_ZONE_DMA
 409        max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
 410#endif
 411#ifdef CONFIG_ZONE_DMA32
 412        max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
 413#endif
 414        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 415#ifdef CONFIG_HIGHMEM
 416        max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
 417
 418        if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
 419                printk(KERN_WARNING "This processor doesn't support highmem."
 420                       " %ldk highmem ignored\n",
 421                       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
 422                max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
 423        }
 424#endif
 425
 426        free_area_init(max_zone_pfns);
 427}
 428
 429#ifdef CONFIG_64BIT
 430static struct kcore_list kcore_kseg0;
 431#endif
 432
 433static inline void __init mem_init_free_highmem(void)
 434{
 435#ifdef CONFIG_HIGHMEM
 436        unsigned long tmp;
 437
 438        if (cpu_has_dc_aliases)
 439                return;
 440
 441        for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
 442                struct page *page = pfn_to_page(tmp);
 443
 444                if (!memblock_is_memory(PFN_PHYS(tmp)))
 445                        SetPageReserved(page);
 446                else
 447                        free_highmem_page(page);
 448        }
 449#endif
 450}
 451
 452void __init mem_init(void)
 453{
 454        /*
 455         * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
 456         * bits to hold a full 32b physical address on MIPS32 systems.
 457         */
 458        BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
 459
 460#ifdef CONFIG_HIGHMEM
 461#ifdef CONFIG_DISCONTIGMEM
 462#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
 463#endif
 464        max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
 465#else
 466        max_mapnr = max_low_pfn;
 467#endif
 468        high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
 469
 470        maar_init();
 471        memblock_free_all();
 472        setup_zero_pages();     /* Setup zeroed pages.  */
 473        mem_init_free_highmem();
 474        mem_init_print_info(NULL);
 475
 476#ifdef CONFIG_64BIT
 477        if ((unsigned long) &_text > (unsigned long) CKSEG0)
 478                /* The -4 is a hack so that user tools don't have to handle
 479                   the overflow.  */
 480                kclist_add(&kcore_kseg0, (void *) CKSEG0,
 481                                0x80000000 - 4, KCORE_TEXT);
 482#endif
 483}
 484#endif /* !CONFIG_NEED_MULTIPLE_NODES */
 485
 486void free_init_pages(const char *what, unsigned long begin, unsigned long end)
 487{
 488        unsigned long pfn;
 489
 490        for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
 491                struct page *page = pfn_to_page(pfn);
 492                void *addr = phys_to_virt(PFN_PHYS(pfn));
 493
 494                memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
 495                free_reserved_page(page);
 496        }
 497        printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
 498}
 499
 500void (*free_init_pages_eva)(void *begin, void *end) = NULL;
 501
 502void __ref free_initmem(void)
 503{
 504        prom_free_prom_memory();
 505        /*
 506         * Let the platform define a specific function to free the
 507         * init section since EVA may have used any possible mapping
 508         * between virtual and physical addresses.
 509         */
 510        if (free_init_pages_eva)
 511                free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
 512        else
 513                free_initmem_default(POISON_FREE_INITMEM);
 514}
 515
 516#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 517unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 518EXPORT_SYMBOL(__per_cpu_offset);
 519
 520static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 521{
 522        return node_distance(cpu_to_node(from), cpu_to_node(to));
 523}
 524
 525static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
 526                                       size_t align)
 527{
 528        return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
 529                                      MEMBLOCK_ALLOC_ACCESSIBLE,
 530                                      cpu_to_node(cpu));
 531}
 532
 533static void __init pcpu_fc_free(void *ptr, size_t size)
 534{
 535        memblock_free_early(__pa(ptr), size);
 536}
 537
 538void __init setup_per_cpu_areas(void)
 539{
 540        unsigned long delta;
 541        unsigned int cpu;
 542        int rc;
 543
 544        /*
 545         * Always reserve area for module percpu variables.  That's
 546         * what the legacy allocator did.
 547         */
 548        rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
 549                                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
 550                                    pcpu_cpu_distance,
 551                                    pcpu_fc_alloc, pcpu_fc_free);
 552        if (rc < 0)
 553                panic("Failed to initialize percpu areas.");
 554
 555        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
 556        for_each_possible_cpu(cpu)
 557                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
 558}
 559#endif
 560
 561#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
 562unsigned long pgd_current[NR_CPUS];
 563#endif
 564
 565/*
 566 * Align swapper_pg_dir in to 64K, allows its address to be loaded
 567 * with a single LUI instruction in the TLB handlers.  If we used
 568 * __aligned(64K), its size would get rounded up to the alignment
 569 * size, and waste space.  So we place it in its own section and align
 570 * it in the linker script.
 571 */
 572pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 573#ifndef __PAGETABLE_PUD_FOLDED
 574pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
 575#endif
 576#ifndef __PAGETABLE_PMD_FOLDED
 577pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
 578EXPORT_SYMBOL_GPL(invalid_pmd_table);
 579#endif
 580pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
 581EXPORT_SYMBOL(invalid_pte_table);
 582