linux/arch/arm/lib/uaccess_with_memcpy.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/lib/uaccess_with_memcpy.c
   3 *
   4 *  Written by: Lennert Buytenhek and Nicolas Pitre
   5 *  Copyright (C) 2009 Marvell Semiconductor
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/ctype.h>
  14#include <linux/uaccess.h>
  15#include <linux/rwsem.h>
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/hardirq.h> /* for in_atomic() */
  19#include <linux/gfp.h>
  20#include <linux/highmem.h>
  21#include <asm/current.h>
  22#include <asm/page.h>
  23
  24static int
  25pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
  26{
  27        unsigned long addr = (unsigned long)_addr;
  28        pgd_t *pgd;
  29        pmd_t *pmd;
  30        pte_t *pte;
  31        pud_t *pud;
  32        spinlock_t *ptl;
  33
  34        pgd = pgd_offset(current->mm, addr);
  35        if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
  36                return 0;
  37
  38        pud = pud_offset(pgd, addr);
  39        if (unlikely(pud_none(*pud) || pud_bad(*pud)))
  40                return 0;
  41
  42        pmd = pmd_offset(pud, addr);
  43        if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
  44                return 0;
  45
  46        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
  47        if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
  48            !pte_write(*pte) || !pte_dirty(*pte))) {
  49                pte_unmap_unlock(pte, ptl);
  50                return 0;
  51        }
  52
  53        *ptep = pte;
  54        *ptlp = ptl;
  55
  56        return 1;
  57}
  58
  59static unsigned long noinline
  60__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
  61{
  62        int atomic;
  63
  64        if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
  65                memcpy((void *)to, from, n);
  66                return 0;
  67        }
  68
  69        /* the mmap semaphore is taken only if not in an atomic context */
  70        atomic = in_atomic();
  71
  72        if (!atomic)
  73                down_read(&current->mm->mmap_sem);
  74        while (n) {
  75                pte_t *pte;
  76                spinlock_t *ptl;
  77                int tocopy;
  78
  79                while (!pin_page_for_write(to, &pte, &ptl)) {
  80                        if (!atomic)
  81                                up_read(&current->mm->mmap_sem);
  82                        if (__put_user(0, (char __user *)to))
  83                                goto out;
  84                        if (!atomic)
  85                                down_read(&current->mm->mmap_sem);
  86                }
  87
  88                tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
  89                if (tocopy > n)
  90                        tocopy = n;
  91
  92                memcpy((void *)to, from, tocopy);
  93                to += tocopy;
  94                from += tocopy;
  95                n -= tocopy;
  96
  97                pte_unmap_unlock(pte, ptl);
  98        }
  99        if (!atomic)
 100                up_read(&current->mm->mmap_sem);
 101
 102out:
 103        return n;
 104}
 105
 106unsigned long
 107__copy_to_user(void __user *to, const void *from, unsigned long n)
 108{
 109        /*
 110         * This test is stubbed out of the main function above to keep
 111         * the overhead for small copies low by avoiding a large
 112         * register dump on the stack just to reload them right away.
 113         * With frame pointer disabled, tail call optimization kicks in
 114         * as well making this test almost invisible.
 115         */
 116        if (n < 64)
 117                return __copy_to_user_std(to, from, n);
 118        return __copy_to_user_memcpy(to, from, n);
 119}
 120        
 121static unsigned long noinline
 122__clear_user_memset(void __user *addr, unsigned long n)
 123{
 124        if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 125                memset((void *)addr, 0, n);
 126                return 0;
 127        }
 128
 129        down_read(&current->mm->mmap_sem);
 130        while (n) {
 131                pte_t *pte;
 132                spinlock_t *ptl;
 133                int tocopy;
 134
 135                while (!pin_page_for_write(addr, &pte, &ptl)) {
 136                        up_read(&current->mm->mmap_sem);
 137                        if (__put_user(0, (char __user *)addr))
 138                                goto out;
 139                        down_read(&current->mm->mmap_sem);
 140                }
 141
 142                tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
 143                if (tocopy > n)
 144                        tocopy = n;
 145
 146                memset((void *)addr, 0, tocopy);
 147                addr += tocopy;
 148                n -= tocopy;
 149
 150                pte_unmap_unlock(pte, ptl);
 151        }
 152        up_read(&current->mm->mmap_sem);
 153
 154out:
 155        return n;
 156}
 157
 158unsigned long __clear_user(void __user *addr, unsigned long n)
 159{
 160        /* See rational for this in __copy_to_user() above. */
 161        if (n < 64)
 162                return __clear_user_std(addr, n);
 163        return __clear_user_memset(addr, n);
 164}
 165
 166#if 0
 167
 168/*
 169 * This code is disabled by default, but kept around in case the chosen
 170 * thresholds need to be revalidated.  Some overhead (small but still)
 171 * would be implied by a runtime determined variable threshold, and
 172 * so far the measurement on concerned targets didn't show a worthwhile
 173 * variation.
 174 *
 175 * Note that a fairly precise sched_clock() implementation is needed
 176 * for results to make some sense.
 177 */
 178
 179#include <linux/vmalloc.h>
 180
 181static int __init test_size_treshold(void)
 182{
 183        struct page *src_page, *dst_page;
 184        void *user_ptr, *kernel_ptr;
 185        unsigned long long t0, t1, t2;
 186        int size, ret;
 187
 188        ret = -ENOMEM;
 189        src_page = alloc_page(GFP_KERNEL);
 190        if (!src_page)
 191                goto no_src;
 192        dst_page = alloc_page(GFP_KERNEL);
 193        if (!dst_page)
 194                goto no_dst;
 195        kernel_ptr = page_address(src_page);
 196        user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
 197        if (!user_ptr)
 198                goto no_vmap;
 199
 200        /* warm up the src page dcache */
 201        ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
 202
 203        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 204                t0 = sched_clock();
 205                ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
 206                t1 = sched_clock();
 207                ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
 208                t2 = sched_clock();
 209                printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 210        }
 211
 212        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 213                t0 = sched_clock();
 214                ret |= __clear_user_memset(user_ptr, size);
 215                t1 = sched_clock();
 216                ret |= __clear_user_std(user_ptr, size);
 217                t2 = sched_clock();
 218                printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 219        }
 220
 221        if (ret)
 222                ret = -EFAULT;
 223
 224        vunmap(user_ptr);
 225no_vmap:
 226        put_page(dst_page);
 227no_dst:
 228        put_page(src_page);
 229no_src:
 230        return ret;
 231}
 232
 233subsys_initcall(test_size_treshold);
 234
 235#endif
 236