linux/arch/arm/lib/uaccess_with_memcpy.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/lib/uaccess_with_memcpy.c
   3 *
   4 *  Written by: Lennert Buytenhek and Nicolas Pitre
   5 *  Copyright (C) 2009 Marvell Semiconductor
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/ctype.h>
  14#include <linux/uaccess.h>
  15#include <linux/rwsem.h>
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/hardirq.h> /* for in_atomic() */
  19#include <linux/gfp.h>
  20#include <linux/highmem.h>
  21#include <linux/hugetlb.h>
  22#include <asm/current.h>
  23#include <asm/page.h>
  24
  25static int
  26pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
  27{
  28        unsigned long addr = (unsigned long)_addr;
  29        pgd_t *pgd;
  30        pmd_t *pmd;
  31        pte_t *pte;
  32        pud_t *pud;
  33        spinlock_t *ptl;
  34
  35        pgd = pgd_offset(current->mm, addr);
  36        if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
  37                return 0;
  38
  39        pud = pud_offset(pgd, addr);
  40        if (unlikely(pud_none(*pud) || pud_bad(*pud)))
  41                return 0;
  42
  43        pmd = pmd_offset(pud, addr);
  44        if (unlikely(pmd_none(*pmd)))
  45                return 0;
  46
  47        /*
  48         * A pmd can be bad if it refers to a HugeTLB or THP page.
  49         *
  50         * Both THP and HugeTLB pages have the same pmd layout
  51         * and should not be manipulated by the pte functions.
  52         *
  53         * Lock the page table for the destination and check
  54         * to see that it's still huge and whether or not we will
  55         * need to fault on write, or if we have a splitting THP.
  56         */
  57        if (unlikely(pmd_thp_or_huge(*pmd))) {
  58                ptl = &current->mm->page_table_lock;
  59                spin_lock(ptl);
  60                if (unlikely(!pmd_thp_or_huge(*pmd)
  61                        || pmd_hugewillfault(*pmd)
  62                        || pmd_trans_splitting(*pmd))) {
  63                        spin_unlock(ptl);
  64                        return 0;
  65                }
  66
  67                *ptep = NULL;
  68                *ptlp = ptl;
  69                return 1;
  70        }
  71
  72        if (unlikely(pmd_bad(*pmd)))
  73                return 0;
  74
  75        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
  76        if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
  77            !pte_write(*pte) || !pte_dirty(*pte))) {
  78                pte_unmap_unlock(pte, ptl);
  79                return 0;
  80        }
  81
  82        *ptep = pte;
  83        *ptlp = ptl;
  84
  85        return 1;
  86}
  87
  88static unsigned long noinline
  89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
  90{
  91        int atomic;
  92
  93        if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
  94                memcpy((void *)to, from, n);
  95                return 0;
  96        }
  97
  98        /* the mmap semaphore is taken only if not in an atomic context */
  99        atomic = in_atomic();
 100
 101        if (!atomic)
 102                down_read(&current->mm->mmap_sem);
 103        while (n) {
 104                pte_t *pte;
 105                spinlock_t *ptl;
 106                int tocopy;
 107
 108                while (!pin_page_for_write(to, &pte, &ptl)) {
 109                        if (!atomic)
 110                                up_read(&current->mm->mmap_sem);
 111                        if (__put_user(0, (char __user *)to))
 112                                goto out;
 113                        if (!atomic)
 114                                down_read(&current->mm->mmap_sem);
 115                }
 116
 117                tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
 118                if (tocopy > n)
 119                        tocopy = n;
 120
 121                memcpy((void *)to, from, tocopy);
 122                to += tocopy;
 123                from += tocopy;
 124                n -= tocopy;
 125
 126                if (pte)
 127                        pte_unmap_unlock(pte, ptl);
 128                else
 129                        spin_unlock(ptl);
 130        }
 131        if (!atomic)
 132                up_read(&current->mm->mmap_sem);
 133
 134out:
 135        return n;
 136}
 137
 138unsigned long
 139__copy_to_user(void __user *to, const void *from, unsigned long n)
 140{
 141        /*
 142         * This test is stubbed out of the main function above to keep
 143         * the overhead for small copies low by avoiding a large
 144         * register dump on the stack just to reload them right away.
 145         * With frame pointer disabled, tail call optimization kicks in
 146         * as well making this test almost invisible.
 147         */
 148        if (n < 64)
 149                return __copy_to_user_std(to, from, n);
 150        return __copy_to_user_memcpy(to, from, n);
 151}
 152        
 153static unsigned long noinline
 154__clear_user_memset(void __user *addr, unsigned long n)
 155{
 156        if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 157                memset((void *)addr, 0, n);
 158                return 0;
 159        }
 160
 161        down_read(&current->mm->mmap_sem);
 162        while (n) {
 163                pte_t *pte;
 164                spinlock_t *ptl;
 165                int tocopy;
 166
 167                while (!pin_page_for_write(addr, &pte, &ptl)) {
 168                        up_read(&current->mm->mmap_sem);
 169                        if (__put_user(0, (char __user *)addr))
 170                                goto out;
 171                        down_read(&current->mm->mmap_sem);
 172                }
 173
 174                tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
 175                if (tocopy > n)
 176                        tocopy = n;
 177
 178                memset((void *)addr, 0, tocopy);
 179                addr += tocopy;
 180                n -= tocopy;
 181
 182                if (pte)
 183                        pte_unmap_unlock(pte, ptl);
 184                else
 185                        spin_unlock(ptl);
 186        }
 187        up_read(&current->mm->mmap_sem);
 188
 189out:
 190        return n;
 191}
 192
 193unsigned long __clear_user(void __user *addr, unsigned long n)
 194{
 195        /* See rational for this in __copy_to_user() above. */
 196        if (n < 64)
 197                return __clear_user_std(addr, n);
 198        return __clear_user_memset(addr, n);
 199}
 200
 201#if 0
 202
 203/*
 204 * This code is disabled by default, but kept around in case the chosen
 205 * thresholds need to be revalidated.  Some overhead (small but still)
 206 * would be implied by a runtime determined variable threshold, and
 207 * so far the measurement on concerned targets didn't show a worthwhile
 208 * variation.
 209 *
 210 * Note that a fairly precise sched_clock() implementation is needed
 211 * for results to make some sense.
 212 */
 213
 214#include <linux/vmalloc.h>
 215
 216static int __init test_size_treshold(void)
 217{
 218        struct page *src_page, *dst_page;
 219        void *user_ptr, *kernel_ptr;
 220        unsigned long long t0, t1, t2;
 221        int size, ret;
 222
 223        ret = -ENOMEM;
 224        src_page = alloc_page(GFP_KERNEL);
 225        if (!src_page)
 226                goto no_src;
 227        dst_page = alloc_page(GFP_KERNEL);
 228        if (!dst_page)
 229                goto no_dst;
 230        kernel_ptr = page_address(src_page);
 231        user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
 232        if (!user_ptr)
 233                goto no_vmap;
 234
 235        /* warm up the src page dcache */
 236        ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
 237
 238        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 239                t0 = sched_clock();
 240                ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
 241                t1 = sched_clock();
 242                ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
 243                t2 = sched_clock();
 244                printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 245        }
 246
 247        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 248                t0 = sched_clock();
 249                ret |= __clear_user_memset(user_ptr, size);
 250                t1 = sched_clock();
 251                ret |= __clear_user_std(user_ptr, size);
 252                t2 = sched_clock();
 253                printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 254        }
 255
 256        if (ret)
 257                ret = -EFAULT;
 258
 259        vunmap(user_ptr);
 260no_vmap:
 261        put_page(dst_page);
 262no_dst:
 263        put_page(src_page);
 264no_src:
 265        return ret;
 266}
 267
 268subsys_initcall(test_size_treshold);
 269
 270#endif
 271