linux/arch/tile/mm/pgtable.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/sched.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/highmem.h>
  21#include <linux/slab.h>
  22#include <linux/pagemap.h>
  23#include <linux/spinlock.h>
  24#include <linux/cpumask.h>
  25#include <linux/module.h>
  26#include <linux/io.h>
  27#include <linux/vmalloc.h>
  28#include <linux/smp.h>
  29
  30#include <asm/pgtable.h>
  31#include <asm/pgalloc.h>
  32#include <asm/fixmap.h>
  33#include <asm/tlb.h>
  34#include <asm/tlbflush.h>
  35#include <asm/homecache.h>
  36
  37#define K(x) ((x) << (PAGE_SHIFT-10))
  38
  39/**
  40 * shatter_huge_page() - ensure a given address is mapped by a small page.
  41 *
  42 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
  43 * of small PTEs with the same caching.  No cache flush required, but we
  44 * must do a global TLB flush.
  45 *
  46 * Any caller that wishes to modify a kernel mapping that might
  47 * have been made with a huge page should call this function,
  48 * since doing so properly avoids race conditions with installing the
  49 * newly-shattered page and then flushing all the TLB entries.
  50 *
  51 * @addr: Address at which to shatter any existing huge page.
  52 */
  53void shatter_huge_page(unsigned long addr)
  54{
  55        pgd_t *pgd;
  56        pud_t *pud;
  57        pmd_t *pmd;
  58        unsigned long flags = 0;  /* happy compiler */
  59#ifdef __PAGETABLE_PMD_FOLDED
  60        struct list_head *pos;
  61#endif
  62
  63        /* Get a pointer to the pmd entry that we need to change. */
  64        addr &= HPAGE_MASK;
  65        BUG_ON(pgd_addr_invalid(addr));
  66        BUG_ON(addr < PAGE_OFFSET);  /* only for kernel LOWMEM */
  67        pgd = swapper_pg_dir + pgd_index(addr);
  68        pud = pud_offset(pgd, addr);
  69        BUG_ON(!pud_present(*pud));
  70        pmd = pmd_offset(pud, addr);
  71        BUG_ON(!pmd_present(*pmd));
  72        if (!pmd_huge_page(*pmd))
  73                return;
  74
  75        spin_lock_irqsave(&init_mm.page_table_lock, flags);
  76        if (!pmd_huge_page(*pmd)) {
  77                /* Lost the race to convert the huge page. */
  78                spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
  79                return;
  80        }
  81
  82        /* Shatter the huge page into the preallocated L2 page table. */
  83        pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd)));
  84
  85#ifdef __PAGETABLE_PMD_FOLDED
  86        /* Walk every pgd on the system and update the pmd there. */
  87        spin_lock(&pgd_lock);
  88        list_for_each(pos, &pgd_list) {
  89                pmd_t *copy_pmd;
  90                pgd = list_to_pgd(pos) + pgd_index(addr);
  91                pud = pud_offset(pgd, addr);
  92                copy_pmd = pmd_offset(pud, addr);
  93                __set_pmd(copy_pmd, *pmd);
  94        }
  95        spin_unlock(&pgd_lock);
  96#endif
  97
  98        /* Tell every cpu to notice the change. */
  99        flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
 100                     cpu_possible_mask, NULL, 0);
 101
 102        /* Hold the lock until the TLB flush is finished to avoid races. */
 103        spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
 104}
 105
 106/*
 107 * List of all pgd's needed so it can invalidate entries in both cached
 108 * and uncached pgd's. This is essentially codepath-based locking
 109 * against pageattr.c; it is the unique case in which a valid change
 110 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 111 * vmalloc faults work because attached pagetables are never freed.
 112 *
 113 * The lock is always taken with interrupts disabled, unlike on x86
 114 * and other platforms, because we need to take the lock in
 115 * shatter_huge_page(), which may be called from an interrupt context.
 116 * We are not at risk from the tlbflush IPI deadlock that was seen on
 117 * x86, since we use the flush_remote() API to have the hypervisor do
 118 * the TLB flushes regardless of irq disabling.
 119 */
 120DEFINE_SPINLOCK(pgd_lock);
 121LIST_HEAD(pgd_list);
 122
 123static inline void pgd_list_add(pgd_t *pgd)
 124{
 125        list_add(pgd_to_list(pgd), &pgd_list);
 126}
 127
 128static inline void pgd_list_del(pgd_t *pgd)
 129{
 130        list_del(pgd_to_list(pgd));
 131}
 132
 133#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
 134#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
 135
 136static void pgd_ctor(pgd_t *pgd)
 137{
 138        unsigned long flags;
 139
 140        memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
 141        spin_lock_irqsave(&pgd_lock, flags);
 142
 143#ifndef __tilegx__
 144        /*
 145         * Check that the user interrupt vector has no L2.
 146         * It never should for the swapper, and new page tables
 147         * should always start with an empty user interrupt vector.
 148         */
 149        BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
 150#endif
 151
 152        memcpy(pgd + KERNEL_PGD_INDEX_START,
 153               swapper_pg_dir + KERNEL_PGD_INDEX_START,
 154               KERNEL_PGD_PTRS * sizeof(pgd_t));
 155
 156        pgd_list_add(pgd);
 157        spin_unlock_irqrestore(&pgd_lock, flags);
 158}
 159
 160static void pgd_dtor(pgd_t *pgd)
 161{
 162        unsigned long flags; /* can be called from interrupt context */
 163
 164        spin_lock_irqsave(&pgd_lock, flags);
 165        pgd_list_del(pgd);
 166        spin_unlock_irqrestore(&pgd_lock, flags);
 167}
 168
 169pgd_t *pgd_alloc(struct mm_struct *mm)
 170{
 171        pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
 172        if (pgd)
 173                pgd_ctor(pgd);
 174        return pgd;
 175}
 176
 177void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 178{
 179        pgd_dtor(pgd);
 180        kmem_cache_free(pgd_cache, pgd);
 181}
 182
 183
 184#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
 185
 186struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
 187                               int order)
 188{
 189        gfp_t flags = GFP_KERNEL|__GFP_ZERO;
 190        struct page *p;
 191        int i;
 192
 193        p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
 194        if (p == NULL)
 195                return NULL;
 196
 197        if (!pgtable_page_ctor(p)) {
 198                __free_pages(p, L2_USER_PGTABLE_ORDER);
 199                return NULL;
 200        }
 201
 202        /*
 203         * Make every page have a page_count() of one, not just the first.
 204         * We don't use __GFP_COMP since it doesn't look like it works
 205         * correctly with tlb_remove_page().
 206         */
 207        for (i = 1; i < order; ++i) {
 208                init_page_count(p+i);
 209                inc_zone_page_state(p+i, NR_PAGETABLE);
 210        }
 211
 212        return p;
 213}
 214
 215/*
 216 * Free page immediately (used in __pte_alloc if we raced with another
 217 * process).  We have to correct whatever pte_alloc_one() did before
 218 * returning the pages to the allocator.
 219 */
 220void pgtable_free(struct mm_struct *mm, struct page *p, int order)
 221{
 222        int i;
 223
 224        pgtable_page_dtor(p);
 225        __free_page(p);
 226
 227        for (i = 1; i < order; ++i) {
 228                __free_page(p+i);
 229                dec_zone_page_state(p+i, NR_PAGETABLE);
 230        }
 231}
 232
 233void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
 234                        unsigned long address, int order)
 235{
 236        int i;
 237
 238        pgtable_page_dtor(pte);
 239        tlb_remove_page(tlb, pte);
 240
 241        for (i = 1; i < order; ++i) {
 242                tlb_remove_page(tlb, pte + i);
 243                dec_zone_page_state(pte + i, NR_PAGETABLE);
 244        }
 245}
 246
 247#ifndef __tilegx__
 248
 249/*
 250 * FIXME: needs to be atomic vs hypervisor writes.  For now we make the
 251 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
 252 */
 253int ptep_test_and_clear_young(struct vm_area_struct *vma,
 254                              unsigned long addr, pte_t *ptep)
 255{
 256#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
 257# error Code assumes HV_PTE "accessed" bit in second byte
 258#endif
 259        u8 *tmp = (u8 *)ptep;
 260        u8 second_byte = tmp[1];
 261        if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
 262                return 0;
 263        tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
 264        return 1;
 265}
 266
 267/*
 268 * This implementation is atomic vs hypervisor writes, since the hypervisor
 269 * always writes the low word (where "accessed" and "dirty" are) and this
 270 * routine only writes the high word.
 271 */
 272void ptep_set_wrprotect(struct mm_struct *mm,
 273                        unsigned long addr, pte_t *ptep)
 274{
 275#if HV_PTE_INDEX_WRITABLE < 32
 276# error Code assumes HV_PTE "writable" bit in high word
 277#endif
 278        u32 *tmp = (u32 *)ptep;
 279        tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
 280}
 281
 282#endif
 283
 284/*
 285 * Return a pointer to the PTE that corresponds to the given
 286 * address in the given page table.  A NULL page table just uses
 287 * the standard kernel page table; the preferred API in this case
 288 * is virt_to_kpte().
 289 *
 290 * The returned pointer can point to a huge page in other levels
 291 * of the page table than the bottom, if the huge page is present
 292 * in the page table.  For bottom-level PTEs, the returned pointer
 293 * can point to a PTE that is either present or not.
 294 */
 295pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
 296{
 297        pgd_t *pgd;
 298        pud_t *pud;
 299        pmd_t *pmd;
 300
 301        if (pgd_addr_invalid(addr))
 302                return NULL;
 303
 304        pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
 305        pud = pud_offset(pgd, addr);
 306        if (!pud_present(*pud))
 307                return NULL;
 308        if (pud_huge_page(*pud))
 309                return (pte_t *)pud;
 310        pmd = pmd_offset(pud, addr);
 311        if (!pmd_present(*pmd))
 312                return NULL;
 313        if (pmd_huge_page(*pmd))
 314                return (pte_t *)pmd;
 315        return pte_offset_kernel(pmd, addr);
 316}
 317EXPORT_SYMBOL(virt_to_pte);
 318
 319pte_t *virt_to_kpte(unsigned long kaddr)
 320{
 321        BUG_ON(kaddr < PAGE_OFFSET);
 322        return virt_to_pte(NULL, kaddr);
 323}
 324EXPORT_SYMBOL(virt_to_kpte);
 325
 326pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
 327{
 328        unsigned int width = smp_width;
 329        int x = cpu % width;
 330        int y = cpu / width;
 331        BUG_ON(y >= smp_height);
 332        BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
 333        BUG_ON(cpu < 0 || cpu >= NR_CPUS);
 334        BUG_ON(!cpu_is_valid_lotar(cpu));
 335        return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
 336}
 337
 338int get_remote_cache_cpu(pgprot_t prot)
 339{
 340        HV_LOTAR lotar = hv_pte_get_lotar(prot);
 341        int x = HV_LOTAR_X(lotar);
 342        int y = HV_LOTAR_Y(lotar);
 343        BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
 344        return x + y * smp_width;
 345}
 346
 347/*
 348 * Convert a kernel VA to a PA and homing information.
 349 */
 350int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
 351{
 352        struct page *page = virt_to_page(va);
 353        pte_t null_pte = { 0 };
 354
 355        *cpa = __pa(va);
 356
 357        /* Note that this is not writing a page table, just returning a pte. */
 358        *pte = pte_set_home(null_pte, page_home(page));
 359
 360        return 0; /* return non-zero if not hfh? */
 361}
 362EXPORT_SYMBOL(va_to_cpa_and_pte);
 363
 364void __set_pte(pte_t *ptep, pte_t pte)
 365{
 366#ifdef __tilegx__
 367        *ptep = pte;
 368#else
 369# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
 370#  error Must write the present and migrating bits last
 371# endif
 372        if (pte_present(pte)) {
 373                ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
 374                barrier();
 375                ((u32 *)ptep)[0] = (u32)(pte_val(pte));
 376        } else {
 377                ((u32 *)ptep)[0] = (u32)(pte_val(pte));
 378                barrier();
 379                ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
 380        }
 381#endif /* __tilegx__ */
 382}
 383
 384void set_pte(pte_t *ptep, pte_t pte)
 385{
 386        if (pte_present(pte) &&
 387            (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
 388                /* The PTE actually references physical memory. */
 389                unsigned long pfn = pte_pfn(pte);
 390                if (pfn_valid(pfn)) {
 391                        /* Update the home of the PTE from the struct page. */
 392                        pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
 393                } else if (hv_pte_get_mode(pte) == 0) {
 394                        /* remap_pfn_range(), etc, must supply PTE mode. */
 395                        panic("set_pte(): out-of-range PFN and mode 0\n");
 396                }
 397        }
 398
 399        __set_pte(ptep, pte);
 400}
 401
 402/* Can this mm load a PTE with cached_priority set? */
 403static inline int mm_is_priority_cached(struct mm_struct *mm)
 404{
 405        return mm->context.priority_cached != 0;
 406}
 407
 408/*
 409 * Add a priority mapping to an mm_context and
 410 * notify the hypervisor if this is the first one.
 411 */
 412void start_mm_caching(struct mm_struct *mm)
 413{
 414        if (!mm_is_priority_cached(mm)) {
 415                mm->context.priority_cached = -1UL;
 416                hv_set_caching(-1UL);
 417        }
 418}
 419
 420/*
 421 * Validate and return the priority_cached flag.  We know if it's zero
 422 * that we don't need to scan, since we immediately set it non-zero
 423 * when we first consider a MAP_CACHE_PRIORITY mapping.
 424 *
 425 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
 426 * since we're in an interrupt context (servicing switch_mm) we don't
 427 * worry about it and don't unset the "priority_cached" field.
 428 * Presumably we'll come back later and have more luck and clear
 429 * the value then; for now we'll just keep the cache marked for priority.
 430 */
 431static unsigned long update_priority_cached(struct mm_struct *mm)
 432{
 433        if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
 434                struct vm_area_struct *vm;
 435                for (vm = mm->mmap; vm; vm = vm->vm_next) {
 436                        if (hv_pte_get_cached_priority(vm->vm_page_prot))
 437                                break;
 438                }
 439                if (vm == NULL)
 440                        mm->context.priority_cached = 0;
 441                up_write(&mm->mmap_sem);
 442        }
 443        return mm->context.priority_cached;
 444}
 445
 446/* Set caching correctly for an mm that we are switching to. */
 447void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
 448{
 449        if (!mm_is_priority_cached(next)) {
 450                /*
 451                 * If the new mm doesn't use priority caching, just see if we
 452                 * need the hv_set_caching(), or can assume it's already zero.
 453                 */
 454                if (mm_is_priority_cached(prev))
 455                        hv_set_caching(0);
 456        } else {
 457                hv_set_caching(update_priority_cached(next));
 458        }
 459}
 460
 461#if CHIP_HAS_MMIO()
 462
 463/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
 464void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
 465                           pgprot_t home)
 466{
 467        void *addr;
 468        struct vm_struct *area;
 469        unsigned long offset, last_addr;
 470        pgprot_t pgprot;
 471
 472        /* Don't allow wraparound or zero size */
 473        last_addr = phys_addr + size - 1;
 474        if (!size || last_addr < phys_addr)
 475                return NULL;
 476
 477        /* Create a read/write, MMIO VA mapping homed at the requested shim. */
 478        pgprot = PAGE_KERNEL;
 479        pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
 480        pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
 481
 482        /*
 483         * Mappings have to be page-aligned
 484         */
 485        offset = phys_addr & ~PAGE_MASK;
 486        phys_addr &= PAGE_MASK;
 487        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 488
 489        /*
 490         * Ok, go for it..
 491         */
 492        area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
 493        if (!area)
 494                return NULL;
 495        area->phys_addr = phys_addr;
 496        addr = area->addr;
 497        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
 498                               phys_addr, pgprot)) {
 499                free_vm_area(area);
 500                return NULL;
 501        }
 502        return (__force void __iomem *) (offset + (char *)addr);
 503}
 504EXPORT_SYMBOL(ioremap_prot);
 505
 506#if !defined(CONFIG_PCI) || !defined(CONFIG_TILEGX)
 507/* ioremap is conditionally declared in pci_gx.c */
 508
 509void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
 510{
 511        return NULL;
 512}
 513EXPORT_SYMBOL(ioremap);
 514
 515#endif
 516
 517/* Unmap an MMIO VA mapping. */
 518void iounmap(volatile void __iomem *addr_in)
 519{
 520        volatile void __iomem *addr = (volatile void __iomem *)
 521                (PAGE_MASK & (unsigned long __force)addr_in);
 522#if 1
 523        vunmap((void * __force)addr);
 524#else
 525        /* x86 uses this complicated flow instead of vunmap().  Is
 526         * there any particular reason we should do the same? */
 527        struct vm_struct *p, *o;
 528
 529        /* Use the vm area unlocked, assuming the caller
 530           ensures there isn't another iounmap for the same address
 531           in parallel. Reuse of the virtual address is prevented by
 532           leaving it in the global lists until we're done with it.
 533           cpa takes care of the direct mappings. */
 534        p = find_vm_area((void *)addr);
 535
 536        if (!p) {
 537                pr_err("iounmap: bad address %p\n", addr);
 538                dump_stack();
 539                return;
 540        }
 541
 542        /* Finally remove it */
 543        o = remove_vm_area((void *)addr);
 544        BUG_ON(p != o || o == NULL);
 545        kfree(p);
 546#endif
 547}
 548EXPORT_SYMBOL(iounmap);
 549
 550#endif /* CHIP_HAS_MMIO() */
 551