linux/arch/powerpc/mm/subpage-prot.c
<<
>>
Prefs
   1/*
   2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public License
   6 * as published by the Free Software Foundation; either version
   7 * 2 of the License, or (at your option) any later version.
   8 */
   9
  10#include <linux/errno.h>
  11#include <linux/kernel.h>
  12#include <linux/gfp.h>
  13#include <linux/types.h>
  14#include <linux/mm.h>
  15#include <linux/hugetlb.h>
  16#include <linux/syscalls.h>
  17
  18#include <asm/pgtable.h>
  19#include <linux/uaccess.h>
  20
  21/*
  22 * Free all pages allocated for subpage protection maps and pointers.
  23 * Also makes sure that the subpage_prot_table structure is
  24 * reinitialized for the next user.
  25 */
  26void subpage_prot_free(struct mm_struct *mm)
  27{
  28        struct subpage_prot_table *spt = &mm->context.spt;
  29        unsigned long i, j, addr;
  30        u32 **p;
  31
  32        for (i = 0; i < 4; ++i) {
  33                if (spt->low_prot[i]) {
  34                        free_page((unsigned long)spt->low_prot[i]);
  35                        spt->low_prot[i] = NULL;
  36                }
  37        }
  38        addr = 0;
  39        for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
  40                p = spt->protptrs[i];
  41                if (!p)
  42                        continue;
  43                spt->protptrs[i] = NULL;
  44                for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
  45                     ++j, addr += PAGE_SIZE)
  46                        if (p[j])
  47                                free_page((unsigned long)p[j]);
  48                free_page((unsigned long)p);
  49        }
  50        spt->maxaddr = 0;
  51}
  52
  53void subpage_prot_init_new_context(struct mm_struct *mm)
  54{
  55        struct subpage_prot_table *spt = &mm->context.spt;
  56
  57        memset(spt, 0, sizeof(*spt));
  58}
  59
  60static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
  61                             int npages)
  62{
  63        pgd_t *pgd;
  64        pud_t *pud;
  65        pmd_t *pmd;
  66        pte_t *pte;
  67        spinlock_t *ptl;
  68
  69        pgd = pgd_offset(mm, addr);
  70        if (pgd_none(*pgd))
  71                return;
  72        pud = pud_offset(pgd, addr);
  73        if (pud_none(*pud))
  74                return;
  75        pmd = pmd_offset(pud, addr);
  76        if (pmd_none(*pmd))
  77                return;
  78        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  79        arch_enter_lazy_mmu_mode();
  80        for (; npages > 0; --npages) {
  81                pte_update(mm, addr, pte, 0, 0, 0);
  82                addr += PAGE_SIZE;
  83                ++pte;
  84        }
  85        arch_leave_lazy_mmu_mode();
  86        pte_unmap_unlock(pte - 1, ptl);
  87}
  88
  89/*
  90 * Clear the subpage protection map for an address range, allowing
  91 * all accesses that are allowed by the pte permissions.
  92 */
  93static void subpage_prot_clear(unsigned long addr, unsigned long len)
  94{
  95        struct mm_struct *mm = current->mm;
  96        struct subpage_prot_table *spt = &mm->context.spt;
  97        u32 **spm, *spp;
  98        unsigned long i;
  99        size_t nw;
 100        unsigned long next, limit;
 101
 102        down_write(&mm->mmap_sem);
 103        limit = addr + len;
 104        if (limit > spt->maxaddr)
 105                limit = spt->maxaddr;
 106        for (; addr < limit; addr = next) {
 107                next = pmd_addr_end(addr, limit);
 108                if (addr < 0x100000000UL) {
 109                        spm = spt->low_prot;
 110                } else {
 111                        spm = spt->protptrs[addr >> SBP_L3_SHIFT];
 112                        if (!spm)
 113                                continue;
 114                }
 115                spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
 116                if (!spp)
 117                        continue;
 118                spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
 119
 120                i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 121                nw = PTRS_PER_PTE - i;
 122                if (addr + (nw << PAGE_SHIFT) > next)
 123                        nw = (next - addr) >> PAGE_SHIFT;
 124
 125                memset(spp, 0, nw * sizeof(u32));
 126
 127                /* now flush any existing HPTEs for the range */
 128                hpte_flush_range(mm, addr, nw);
 129        }
 130        up_write(&mm->mmap_sem);
 131}
 132
 133#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 134static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
 135                                  unsigned long end, struct mm_walk *walk)
 136{
 137        struct vm_area_struct *vma = walk->vma;
 138        split_huge_pmd(vma, pmd, addr);
 139        return 0;
 140}
 141
 142static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
 143                                    unsigned long len)
 144{
 145        struct vm_area_struct *vma;
 146        struct mm_walk subpage_proto_walk = {
 147                .mm = mm,
 148                .pmd_entry = subpage_walk_pmd_entry,
 149        };
 150
 151        /*
 152         * We don't try too hard, we just mark all the vma in that range
 153         * VM_NOHUGEPAGE and split them.
 154         */
 155        vma = find_vma(mm, addr);
 156        /*
 157         * If the range is in unmapped range, just return
 158         */
 159        if (vma && ((addr + len) <= vma->vm_start))
 160                return;
 161
 162        while (vma) {
 163                if (vma->vm_start >= (addr + len))
 164                        break;
 165                vma->vm_flags |= VM_NOHUGEPAGE;
 166                walk_page_vma(vma, &subpage_proto_walk);
 167                vma = vma->vm_next;
 168        }
 169}
 170#else
 171static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
 172                                    unsigned long len)
 173{
 174        return;
 175}
 176#endif
 177
 178/*
 179 * Copy in a subpage protection map for an address range.
 180 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
 181 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
 182 * 2 or 3 to prevent all accesses.
 183 * Note that the normal page protections also apply; the subpage
 184 * protection mechanism is an additional constraint, so putting 0
 185 * in a 2-bit field won't allow writes to a page that is otherwise
 186 * write-protected.
 187 */
 188SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
 189                unsigned long, len, u32 __user *, map)
 190{
 191        struct mm_struct *mm = current->mm;
 192        struct subpage_prot_table *spt = &mm->context.spt;
 193        u32 **spm, *spp;
 194        unsigned long i;
 195        size_t nw;
 196        unsigned long next, limit;
 197        int err;
 198
 199        if (radix_enabled())
 200                return -ENOENT;
 201
 202        /* Check parameters */
 203        if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
 204            addr >= mm->task_size || len >= mm->task_size ||
 205            addr + len > mm->task_size)
 206                return -EINVAL;
 207
 208        if (is_hugepage_only_range(mm, addr, len))
 209                return -EINVAL;
 210
 211        if (!map) {
 212                /* Clear out the protection map for the address range */
 213                subpage_prot_clear(addr, len);
 214                return 0;
 215        }
 216
 217        if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
 218                return -EFAULT;
 219
 220        down_write(&mm->mmap_sem);
 221        subpage_mark_vma_nohuge(mm, addr, len);
 222        for (limit = addr + len; addr < limit; addr = next) {
 223                next = pmd_addr_end(addr, limit);
 224                err = -ENOMEM;
 225                if (addr < 0x100000000UL) {
 226                        spm = spt->low_prot;
 227                } else {
 228                        spm = spt->protptrs[addr >> SBP_L3_SHIFT];
 229                        if (!spm) {
 230                                spm = (u32 **)get_zeroed_page(GFP_KERNEL);
 231                                if (!spm)
 232                                        goto out;
 233                                spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
 234                        }
 235                }
 236                spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
 237                spp = *spm;
 238                if (!spp) {
 239                        spp = (u32 *)get_zeroed_page(GFP_KERNEL);
 240                        if (!spp)
 241                                goto out;
 242                        *spm = spp;
 243                }
 244                spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
 245
 246                local_irq_disable();
 247                demote_segment_4k(mm, addr);
 248                local_irq_enable();
 249
 250                i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 251                nw = PTRS_PER_PTE - i;
 252                if (addr + (nw << PAGE_SHIFT) > next)
 253                        nw = (next - addr) >> PAGE_SHIFT;
 254
 255                up_write(&mm->mmap_sem);
 256                if (__copy_from_user(spp, map, nw * sizeof(u32)))
 257                        return -EFAULT;
 258                map += nw;
 259                down_write(&mm->mmap_sem);
 260
 261                /* now flush any existing HPTEs for the range */
 262                hpte_flush_range(mm, addr, nw);
 263        }
 264        if (limit > spt->maxaddr)
 265                spt->maxaddr = limit;
 266        err = 0;
 267 out:
 268        up_write(&mm->mmap_sem);
 269        return err;
 270}
 271