linux/arch/sh/mm/pmb.c
<<
>>
Prefs
   1/*
   2 * arch/sh/mm/pmb.c
   3 *
   4 * Privileged Space Mapping Buffer (PMB) Support.
   5 *
   6 * Copyright (C) 2005 - 2011  Paul Mundt
   7 * Copyright (C) 2010  Matt Fleming
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/syscore_ops.h>
  16#include <linux/cpu.h>
  17#include <linux/module.h>
  18#include <linux/bitops.h>
  19#include <linux/debugfs.h>
  20#include <linux/fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/err.h>
  23#include <linux/io.h>
  24#include <linux/spinlock.h>
  25#include <linux/vmalloc.h>
  26#include <asm/cacheflush.h>
  27#include <asm/sizes.h>
  28#include <asm/uaccess.h>
  29#include <asm/pgtable.h>
  30#include <asm/page.h>
  31#include <asm/mmu.h>
  32#include <asm/mmu_context.h>
  33
  34struct pmb_entry;
  35
  36struct pmb_entry {
  37        unsigned long vpn;
  38        unsigned long ppn;
  39        unsigned long flags;
  40        unsigned long size;
  41
  42        raw_spinlock_t lock;
  43
  44        /*
  45         * 0 .. NR_PMB_ENTRIES for specific entry selection, or
  46         * PMB_NO_ENTRY to search for a free one
  47         */
  48        int entry;
  49
  50        /* Adjacent entry link for contiguous multi-entry mappings */
  51        struct pmb_entry *link;
  52};
  53
  54static struct {
  55        unsigned long size;
  56        int flag;
  57} pmb_sizes[] = {
  58        { .size = SZ_512M, .flag = PMB_SZ_512M, },
  59        { .size = SZ_128M, .flag = PMB_SZ_128M, },
  60        { .size = SZ_64M,  .flag = PMB_SZ_64M,  },
  61        { .size = SZ_16M,  .flag = PMB_SZ_16M,  },
  62};
  63
  64static void pmb_unmap_entry(struct pmb_entry *, int depth);
  65
  66static DEFINE_RWLOCK(pmb_rwlock);
  67static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
  68static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
  69
  70static unsigned int pmb_iomapping_enabled;
  71
  72static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
  73{
  74        return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  75}
  76
  77static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
  78{
  79        return mk_pmb_entry(entry) | PMB_ADDR;
  80}
  81
  82static __always_inline unsigned long mk_pmb_data(unsigned int entry)
  83{
  84        return mk_pmb_entry(entry) | PMB_DATA;
  85}
  86
  87static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  88{
  89        return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  90}
  91
  92/*
  93 * Ensure that the PMB entries match our cache configuration.
  94 *
  95 * When we are in 32-bit address extended mode, CCR.CB becomes
  96 * invalid, so care must be taken to manually adjust cacheable
  97 * translations.
  98 */
  99static __always_inline unsigned long pmb_cache_flags(void)
 100{
 101        unsigned long flags = 0;
 102
 103#if defined(CONFIG_CACHE_OFF)
 104        flags |= PMB_WT | PMB_UB;
 105#elif defined(CONFIG_CACHE_WRITETHROUGH)
 106        flags |= PMB_C | PMB_WT | PMB_UB;
 107#elif defined(CONFIG_CACHE_WRITEBACK)
 108        flags |= PMB_C;
 109#endif
 110
 111        return flags;
 112}
 113
 114/*
 115 * Convert typical pgprot value to the PMB equivalent
 116 */
 117static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
 118{
 119        unsigned long pmb_flags = 0;
 120        u64 flags = pgprot_val(prot);
 121
 122        if (flags & _PAGE_CACHABLE)
 123                pmb_flags |= PMB_C;
 124        if (flags & _PAGE_WT)
 125                pmb_flags |= PMB_WT | PMB_UB;
 126
 127        return pmb_flags;
 128}
 129
 130static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
 131{
 132        return (b->vpn == (a->vpn + a->size)) &&
 133               (b->ppn == (a->ppn + a->size)) &&
 134               (b->flags == a->flags);
 135}
 136
 137static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
 138                               unsigned long size)
 139{
 140        int i;
 141
 142        read_lock(&pmb_rwlock);
 143
 144        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 145                struct pmb_entry *pmbe, *iter;
 146                unsigned long span;
 147
 148                if (!test_bit(i, pmb_map))
 149                        continue;
 150
 151                pmbe = &pmb_entry_list[i];
 152
 153                /*
 154                 * See if VPN and PPN are bounded by an existing mapping.
 155                 */
 156                if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
 157                        continue;
 158                if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
 159                        continue;
 160
 161                /*
 162                 * Now see if we're in range of a simple mapping.
 163                 */
 164                if (size <= pmbe->size) {
 165                        read_unlock(&pmb_rwlock);
 166                        return true;
 167                }
 168
 169                span = pmbe->size;
 170
 171                /*
 172                 * Finally for sizes that involve compound mappings, walk
 173                 * the chain.
 174                 */
 175                for (iter = pmbe->link; iter; iter = iter->link)
 176                        span += iter->size;
 177
 178                /*
 179                 * Nothing else to do if the range requirements are met.
 180                 */
 181                if (size <= span) {
 182                        read_unlock(&pmb_rwlock);
 183                        return true;
 184                }
 185        }
 186
 187        read_unlock(&pmb_rwlock);
 188        return false;
 189}
 190
 191static bool pmb_size_valid(unsigned long size)
 192{
 193        int i;
 194
 195        for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 196                if (pmb_sizes[i].size == size)
 197                        return true;
 198
 199        return false;
 200}
 201
 202static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
 203{
 204        return (addr >= P1SEG && (addr + size - 1) < P3SEG);
 205}
 206
 207static inline bool pmb_prot_valid(pgprot_t prot)
 208{
 209        return (pgprot_val(prot) & _PAGE_USER) == 0;
 210}
 211
 212static int pmb_size_to_flags(unsigned long size)
 213{
 214        int i;
 215
 216        for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 217                if (pmb_sizes[i].size == size)
 218                        return pmb_sizes[i].flag;
 219
 220        return 0;
 221}
 222
 223static int pmb_alloc_entry(void)
 224{
 225        int pos;
 226
 227        pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
 228        if (pos >= 0 && pos < NR_PMB_ENTRIES)
 229                __set_bit(pos, pmb_map);
 230        else
 231                pos = -ENOSPC;
 232
 233        return pos;
 234}
 235
 236static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
 237                                   unsigned long flags, int entry)
 238{
 239        struct pmb_entry *pmbe;
 240        unsigned long irqflags;
 241        void *ret = NULL;
 242        int pos;
 243
 244        write_lock_irqsave(&pmb_rwlock, irqflags);
 245
 246        if (entry == PMB_NO_ENTRY) {
 247                pos = pmb_alloc_entry();
 248                if (unlikely(pos < 0)) {
 249                        ret = ERR_PTR(pos);
 250                        goto out;
 251                }
 252        } else {
 253                if (__test_and_set_bit(entry, pmb_map)) {
 254                        ret = ERR_PTR(-ENOSPC);
 255                        goto out;
 256                }
 257
 258                pos = entry;
 259        }
 260
 261        write_unlock_irqrestore(&pmb_rwlock, irqflags);
 262
 263        pmbe = &pmb_entry_list[pos];
 264
 265        memset(pmbe, 0, sizeof(struct pmb_entry));
 266
 267        raw_spin_lock_init(&pmbe->lock);
 268
 269        pmbe->vpn       = vpn;
 270        pmbe->ppn       = ppn;
 271        pmbe->flags     = flags;
 272        pmbe->entry     = pos;
 273
 274        return pmbe;
 275
 276out:
 277        write_unlock_irqrestore(&pmb_rwlock, irqflags);
 278        return ret;
 279}
 280
 281static void pmb_free(struct pmb_entry *pmbe)
 282{
 283        __clear_bit(pmbe->entry, pmb_map);
 284
 285        pmbe->entry     = PMB_NO_ENTRY;
 286        pmbe->link      = NULL;
 287}
 288
 289/*
 290 * Must be run uncached.
 291 */
 292static void __set_pmb_entry(struct pmb_entry *pmbe)
 293{
 294        unsigned long addr, data;
 295
 296        addr = mk_pmb_addr(pmbe->entry);
 297        data = mk_pmb_data(pmbe->entry);
 298
 299        jump_to_uncached();
 300
 301        /* Set V-bit */
 302        __raw_writel(pmbe->vpn | PMB_V, addr);
 303        __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
 304
 305        back_to_cached();
 306}
 307
 308static void __clear_pmb_entry(struct pmb_entry *pmbe)
 309{
 310        unsigned long addr, data;
 311        unsigned long addr_val, data_val;
 312
 313        addr = mk_pmb_addr(pmbe->entry);
 314        data = mk_pmb_data(pmbe->entry);
 315
 316        addr_val = __raw_readl(addr);
 317        data_val = __raw_readl(data);
 318
 319        /* Clear V-bit */
 320        writel_uncached(addr_val & ~PMB_V, addr);
 321        writel_uncached(data_val & ~PMB_V, data);
 322}
 323
 324#ifdef CONFIG_PM
 325static void set_pmb_entry(struct pmb_entry *pmbe)
 326{
 327        unsigned long flags;
 328
 329        raw_spin_lock_irqsave(&pmbe->lock, flags);
 330        __set_pmb_entry(pmbe);
 331        raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 332}
 333#endif /* CONFIG_PM */
 334
 335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
 336                     unsigned long size, pgprot_t prot)
 337{
 338        struct pmb_entry *pmbp, *pmbe;
 339        unsigned long orig_addr, orig_size;
 340        unsigned long flags, pmb_flags;
 341        int i, mapped;
 342
 343        if (size < SZ_16M)
 344                return -EINVAL;
 345        if (!pmb_addr_valid(vaddr, size))
 346                return -EFAULT;
 347        if (pmb_mapping_exists(vaddr, phys, size))
 348                return 0;
 349
 350        orig_addr = vaddr;
 351        orig_size = size;
 352
 353        flush_tlb_kernel_range(vaddr, vaddr + size);
 354
 355        pmb_flags = pgprot_to_pmb_flags(prot);
 356        pmbp = NULL;
 357
 358        do {
 359                for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
 360                        if (size < pmb_sizes[i].size)
 361                                continue;
 362
 363                        pmbe = pmb_alloc(vaddr, phys, pmb_flags |
 364                                         pmb_sizes[i].flag, PMB_NO_ENTRY);
 365                        if (IS_ERR(pmbe)) {
 366                                pmb_unmap_entry(pmbp, mapped);
 367                                return PTR_ERR(pmbe);
 368                        }
 369
 370                        raw_spin_lock_irqsave(&pmbe->lock, flags);
 371
 372                        pmbe->size = pmb_sizes[i].size;
 373
 374                        __set_pmb_entry(pmbe);
 375
 376                        phys    += pmbe->size;
 377                        vaddr   += pmbe->size;
 378                        size    -= pmbe->size;
 379
 380                        /*
 381                         * Link adjacent entries that span multiple PMB
 382                         * entries for easier tear-down.
 383                         */
 384                        if (likely(pmbp)) {
 385                                raw_spin_lock_nested(&pmbp->lock,
 386                                                     SINGLE_DEPTH_NESTING);
 387                                pmbp->link = pmbe;
 388                                raw_spin_unlock(&pmbp->lock);
 389                        }
 390
 391                        pmbp = pmbe;
 392
 393                        /*
 394                         * Instead of trying smaller sizes on every
 395                         * iteration (even if we succeed in allocating
 396                         * space), try using pmb_sizes[i].size again.
 397                         */
 398                        i--;
 399                        mapped++;
 400
 401                        raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 402                }
 403        } while (size >= SZ_16M);
 404
 405        flush_cache_vmap(orig_addr, orig_addr + orig_size);
 406
 407        return 0;
 408}
 409
 410void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
 411                               pgprot_t prot, void *caller)
 412{
 413        unsigned long vaddr;
 414        phys_addr_t offset, last_addr;
 415        phys_addr_t align_mask;
 416        unsigned long aligned;
 417        struct vm_struct *area;
 418        int i, ret;
 419
 420        if (!pmb_iomapping_enabled)
 421                return NULL;
 422
 423        /*
 424         * Small mappings need to go through the TLB.
 425         */
 426        if (size < SZ_16M)
 427                return ERR_PTR(-EINVAL);
 428        if (!pmb_prot_valid(prot))
 429                return ERR_PTR(-EINVAL);
 430
 431        for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 432                if (size >= pmb_sizes[i].size)
 433                        break;
 434
 435        last_addr = phys + size;
 436        align_mask = ~(pmb_sizes[i].size - 1);
 437        offset = phys & ~align_mask;
 438        phys &= align_mask;
 439        aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
 440
 441        /*
 442         * XXX: This should really start from uncached_end, but this
 443         * causes the MMU to reset, so for now we restrict it to the
 444         * 0xb000...0xc000 range.
 445         */
 446        area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
 447                                    P3SEG, caller);
 448        if (!area)
 449                return NULL;
 450
 451        area->phys_addr = phys;
 452        vaddr = (unsigned long)area->addr;
 453
 454        ret = pmb_bolt_mapping(vaddr, phys, size, prot);
 455        if (unlikely(ret != 0))
 456                return ERR_PTR(ret);
 457
 458        return (void __iomem *)(offset + (char *)vaddr);
 459}
 460
 461int pmb_unmap(void __iomem *addr)
 462{
 463        struct pmb_entry *pmbe = NULL;
 464        unsigned long vaddr = (unsigned long __force)addr;
 465        int i, found = 0;
 466
 467        read_lock(&pmb_rwlock);
 468
 469        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 470                if (test_bit(i, pmb_map)) {
 471                        pmbe = &pmb_entry_list[i];
 472                        if (pmbe->vpn == vaddr) {
 473                                found = 1;
 474                                break;
 475                        }
 476                }
 477        }
 478
 479        read_unlock(&pmb_rwlock);
 480
 481        if (found) {
 482                pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
 483                return 0;
 484        }
 485
 486        return -EINVAL;
 487}
 488
 489static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
 490{
 491        do {
 492                struct pmb_entry *pmblink = pmbe;
 493
 494                /*
 495                 * We may be called before this pmb_entry has been
 496                 * entered into the PMB table via set_pmb_entry(), but
 497                 * that's OK because we've allocated a unique slot for
 498                 * this entry in pmb_alloc() (even if we haven't filled
 499                 * it yet).
 500                 *
 501                 * Therefore, calling __clear_pmb_entry() is safe as no
 502                 * other mapping can be using that slot.
 503                 */
 504                __clear_pmb_entry(pmbe);
 505
 506                flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
 507
 508                pmbe = pmblink->link;
 509
 510                pmb_free(pmblink);
 511        } while (pmbe && --depth);
 512}
 513
 514static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
 515{
 516        unsigned long flags;
 517
 518        if (unlikely(!pmbe))
 519                return;
 520
 521        write_lock_irqsave(&pmb_rwlock, flags);
 522        __pmb_unmap_entry(pmbe, depth);
 523        write_unlock_irqrestore(&pmb_rwlock, flags);
 524}
 525
 526static void __init pmb_notify(void)
 527{
 528        int i;
 529
 530        pr_info("PMB: boot mappings:\n");
 531
 532        read_lock(&pmb_rwlock);
 533
 534        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 535                struct pmb_entry *pmbe;
 536
 537                if (!test_bit(i, pmb_map))
 538                        continue;
 539
 540                pmbe = &pmb_entry_list[i];
 541
 542                pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
 543                        pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
 544                        pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
 545        }
 546
 547        read_unlock(&pmb_rwlock);
 548}
 549
 550/*
 551 * Sync our software copy of the PMB mappings with those in hardware. The
 552 * mappings in the hardware PMB were either set up by the bootloader or
 553 * very early on by the kernel.
 554 */
 555static void __init pmb_synchronize(void)
 556{
 557        struct pmb_entry *pmbp = NULL;
 558        int i, j;
 559
 560        /*
 561         * Run through the initial boot mappings, log the established
 562         * ones, and blow away anything that falls outside of the valid
 563         * PPN range. Specifically, we only care about existing mappings
 564         * that impact the cached/uncached sections.
 565         *
 566         * Note that touching these can be a bit of a minefield; the boot
 567         * loader can establish multi-page mappings with the same caching
 568         * attributes, so we need to ensure that we aren't modifying a
 569         * mapping that we're presently executing from, or may execute
 570         * from in the case of straddling page boundaries.
 571         *
 572         * In the future we will have to tidy up after the boot loader by
 573         * jumping between the cached and uncached mappings and tearing
 574         * down alternating mappings while executing from the other.
 575         */
 576        for (i = 0; i < NR_PMB_ENTRIES; i++) {
 577                unsigned long addr, data;
 578                unsigned long addr_val, data_val;
 579                unsigned long ppn, vpn, flags;
 580                unsigned long irqflags;
 581                unsigned int size;
 582                struct pmb_entry *pmbe;
 583
 584                addr = mk_pmb_addr(i);
 585                data = mk_pmb_data(i);
 586
 587                addr_val = __raw_readl(addr);
 588                data_val = __raw_readl(data);
 589
 590                /*
 591                 * Skip over any bogus entries
 592                 */
 593                if (!(data_val & PMB_V) || !(addr_val & PMB_V))
 594                        continue;
 595
 596                ppn = data_val & PMB_PFN_MASK;
 597                vpn = addr_val & PMB_PFN_MASK;
 598
 599                /*
 600                 * Only preserve in-range mappings.
 601                 */
 602                if (!pmb_ppn_in_range(ppn)) {
 603                        /*
 604                         * Invalidate anything out of bounds.
 605                         */
 606                        writel_uncached(addr_val & ~PMB_V, addr);
 607                        writel_uncached(data_val & ~PMB_V, data);
 608                        continue;
 609                }
 610
 611                /*
 612                 * Update the caching attributes if necessary
 613                 */
 614                if (data_val & PMB_C) {
 615                        data_val &= ~PMB_CACHE_MASK;
 616                        data_val |= pmb_cache_flags();
 617
 618                        writel_uncached(data_val, data);
 619                }
 620
 621                size = data_val & PMB_SZ_MASK;
 622                flags = size | (data_val & PMB_CACHE_MASK);
 623
 624                pmbe = pmb_alloc(vpn, ppn, flags, i);
 625                if (IS_ERR(pmbe)) {
 626                        WARN_ON_ONCE(1);
 627                        continue;
 628                }
 629
 630                raw_spin_lock_irqsave(&pmbe->lock, irqflags);
 631
 632                for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
 633                        if (pmb_sizes[j].flag == size)
 634                                pmbe->size = pmb_sizes[j].size;
 635
 636                if (pmbp) {
 637                        raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
 638                        /*
 639                         * Compare the previous entry against the current one to
 640                         * see if the entries span a contiguous mapping. If so,
 641                         * setup the entry links accordingly. Compound mappings
 642                         * are later coalesced.
 643                         */
 644                        if (pmb_can_merge(pmbp, pmbe))
 645                                pmbp->link = pmbe;
 646                        raw_spin_unlock(&pmbp->lock);
 647                }
 648
 649                pmbp = pmbe;
 650
 651                raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
 652        }
 653}
 654
 655static void __init pmb_merge(struct pmb_entry *head)
 656{
 657        unsigned long span, newsize;
 658        struct pmb_entry *tail;
 659        int i = 1, depth = 0;
 660
 661        span = newsize = head->size;
 662
 663        tail = head->link;
 664        while (tail) {
 665                span += tail->size;
 666
 667                if (pmb_size_valid(span)) {
 668                        newsize = span;
 669                        depth = i;
 670                }
 671
 672                /* This is the end of the line.. */
 673                if (!tail->link)
 674                        break;
 675
 676                tail = tail->link;
 677                i++;
 678        }
 679
 680        /*
 681         * The merged page size must be valid.
 682         */
 683        if (!depth || !pmb_size_valid(newsize))
 684                return;
 685
 686        head->flags &= ~PMB_SZ_MASK;
 687        head->flags |= pmb_size_to_flags(newsize);
 688
 689        head->size = newsize;
 690
 691        __pmb_unmap_entry(head->link, depth);
 692        __set_pmb_entry(head);
 693}
 694
 695static void __init pmb_coalesce(void)
 696{
 697        unsigned long flags;
 698        int i;
 699
 700        write_lock_irqsave(&pmb_rwlock, flags);
 701
 702        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 703                struct pmb_entry *pmbe;
 704
 705                if (!test_bit(i, pmb_map))
 706                        continue;
 707
 708                pmbe = &pmb_entry_list[i];
 709
 710                /*
 711                 * We're only interested in compound mappings
 712                 */
 713                if (!pmbe->link)
 714                        continue;
 715
 716                /*
 717                 * Nothing to do if it already uses the largest possible
 718                 * page size.
 719                 */
 720                if (pmbe->size == SZ_512M)
 721                        continue;
 722
 723                pmb_merge(pmbe);
 724        }
 725
 726        write_unlock_irqrestore(&pmb_rwlock, flags);
 727}
 728
 729#ifdef CONFIG_UNCACHED_MAPPING
 730static void __init pmb_resize(void)
 731{
 732        int i;
 733
 734        /*
 735         * If the uncached mapping was constructed by the kernel, it will
 736         * already be a reasonable size.
 737         */
 738        if (uncached_size == SZ_16M)
 739                return;
 740
 741        read_lock(&pmb_rwlock);
 742
 743        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 744                struct pmb_entry *pmbe;
 745                unsigned long flags;
 746
 747                if (!test_bit(i, pmb_map))
 748                        continue;
 749
 750                pmbe = &pmb_entry_list[i];
 751
 752                if (pmbe->vpn != uncached_start)
 753                        continue;
 754
 755                /*
 756                 * Found it, now resize it.
 757                 */
 758                raw_spin_lock_irqsave(&pmbe->lock, flags);
 759
 760                pmbe->size = SZ_16M;
 761                pmbe->flags &= ~PMB_SZ_MASK;
 762                pmbe->flags |= pmb_size_to_flags(pmbe->size);
 763
 764                uncached_resize(pmbe->size);
 765
 766                __set_pmb_entry(pmbe);
 767
 768                raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 769        }
 770
 771        read_unlock(&pmb_rwlock);
 772}
 773#endif
 774
 775static int __init early_pmb(char *p)
 776{
 777        if (!p)
 778                return 0;
 779
 780        if (strstr(p, "iomap"))
 781                pmb_iomapping_enabled = 1;
 782
 783        return 0;
 784}
 785early_param("pmb", early_pmb);
 786
 787void __init pmb_init(void)
 788{
 789        /* Synchronize software state */
 790        pmb_synchronize();
 791
 792        /* Attempt to combine compound mappings */
 793        pmb_coalesce();
 794
 795#ifdef CONFIG_UNCACHED_MAPPING
 796        /* Resize initial mappings, if necessary */
 797        pmb_resize();
 798#endif
 799
 800        /* Log them */
 801        pmb_notify();
 802
 803        writel_uncached(0, PMB_IRMCR);
 804
 805        /* Flush out the TLB */
 806        local_flush_tlb_all();
 807        ctrl_barrier();
 808}
 809
 810bool __in_29bit_mode(void)
 811{
 812        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
 813}
 814
 815static int pmb_seq_show(struct seq_file *file, void *iter)
 816{
 817        int i;
 818
 819        seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
 820                         "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
 821        seq_printf(file, "ety   vpn  ppn  size   flags\n");
 822
 823        for (i = 0; i < NR_PMB_ENTRIES; i++) {
 824                unsigned long addr, data;
 825                unsigned int size;
 826                char *sz_str = NULL;
 827
 828                addr = __raw_readl(mk_pmb_addr(i));
 829                data = __raw_readl(mk_pmb_data(i));
 830
 831                size = data & PMB_SZ_MASK;
 832                sz_str = (size == PMB_SZ_16M)  ? " 16MB":
 833                         (size == PMB_SZ_64M)  ? " 64MB":
 834                         (size == PMB_SZ_128M) ? "128MB":
 835                                                 "512MB";
 836
 837                /* 02: V 0x88 0x08 128MB C CB  B */
 838                seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
 839                           i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
 840                           (addr >> 24) & 0xff, (data >> 24) & 0xff,
 841                           sz_str, (data & PMB_C) ? 'C' : ' ',
 842                           (data & PMB_WT) ? "WT" : "CB",
 843                           (data & PMB_UB) ? "UB" : " B");
 844        }
 845
 846        return 0;
 847}
 848
 849static int pmb_debugfs_open(struct inode *inode, struct file *file)
 850{
 851        return single_open(file, pmb_seq_show, NULL);
 852}
 853
 854static const struct file_operations pmb_debugfs_fops = {
 855        .owner          = THIS_MODULE,
 856        .open           = pmb_debugfs_open,
 857        .read           = seq_read,
 858        .llseek         = seq_lseek,
 859        .release        = single_release,
 860};
 861
 862static int __init pmb_debugfs_init(void)
 863{
 864        struct dentry *dentry;
 865
 866        dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
 867                                     arch_debugfs_dir, NULL, &pmb_debugfs_fops);
 868        if (!dentry)
 869                return -ENOMEM;
 870
 871        return 0;
 872}
 873subsys_initcall(pmb_debugfs_init);
 874
 875#ifdef CONFIG_PM
 876static void pmb_syscore_resume(void)
 877{
 878        struct pmb_entry *pmbe;
 879        int i;
 880
 881        read_lock(&pmb_rwlock);
 882
 883        for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 884                if (test_bit(i, pmb_map)) {
 885                        pmbe = &pmb_entry_list[i];
 886                        set_pmb_entry(pmbe);
 887                }
 888        }
 889
 890        read_unlock(&pmb_rwlock);
 891}
 892
 893static struct syscore_ops pmb_syscore_ops = {
 894        .resume = pmb_syscore_resume,
 895};
 896
 897static int __init pmb_sysdev_init(void)
 898{
 899        register_syscore_ops(&pmb_syscore_ops);
 900        return 0;
 901}
 902subsys_initcall(pmb_sysdev_init);
 903#endif
 904