linux/arch/arm/lib/uaccess_with_memcpy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/lib/uaccess_with_memcpy.c
   4 *
   5 *  Written by: Lennert Buytenhek and Nicolas Pitre
   6 *  Copyright (C) 2009 Marvell Semiconductor
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/ctype.h>
  11#include <linux/uaccess.h>
  12#include <linux/rwsem.h>
  13#include <linux/mm.h>
  14#include <linux/sched.h>
  15#include <linux/hardirq.h> /* for in_atomic() */
  16#include <linux/gfp.h>
  17#include <linux/highmem.h>
  18#include <linux/hugetlb.h>
  19#include <asm/current.h>
  20#include <asm/page.h>
  21
  22static int
  23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
  24{
  25        unsigned long addr = (unsigned long)_addr;
  26        pgd_t *pgd;
  27        pmd_t *pmd;
  28        pte_t *pte;
  29        pud_t *pud;
  30        spinlock_t *ptl;
  31
  32        pgd = pgd_offset(current->mm, addr);
  33        if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
  34                return 0;
  35
  36        pud = pud_offset(pgd, addr);
  37        if (unlikely(pud_none(*pud) || pud_bad(*pud)))
  38                return 0;
  39
  40        pmd = pmd_offset(pud, addr);
  41        if (unlikely(pmd_none(*pmd)))
  42                return 0;
  43
  44        /*
  45         * A pmd can be bad if it refers to a HugeTLB or THP page.
  46         *
  47         * Both THP and HugeTLB pages have the same pmd layout
  48         * and should not be manipulated by the pte functions.
  49         *
  50         * Lock the page table for the destination and check
  51         * to see that it's still huge and whether or not we will
  52         * need to fault on write.
  53         */
  54        if (unlikely(pmd_thp_or_huge(*pmd))) {
  55                ptl = &current->mm->page_table_lock;
  56                spin_lock(ptl);
  57                if (unlikely(!pmd_thp_or_huge(*pmd)
  58                        || pmd_hugewillfault(*pmd))) {
  59                        spin_unlock(ptl);
  60                        return 0;
  61                }
  62
  63                *ptep = NULL;
  64                *ptlp = ptl;
  65                return 1;
  66        }
  67
  68        if (unlikely(pmd_bad(*pmd)))
  69                return 0;
  70
  71        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
  72        if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
  73            !pte_write(*pte) || !pte_dirty(*pte))) {
  74                pte_unmap_unlock(pte, ptl);
  75                return 0;
  76        }
  77
  78        *ptep = pte;
  79        *ptlp = ptl;
  80
  81        return 1;
  82}
  83
  84static unsigned long noinline
  85__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
  86{
  87        unsigned long ua_flags;
  88        int atomic;
  89
  90        if (uaccess_kernel()) {
  91                memcpy((void *)to, from, n);
  92                return 0;
  93        }
  94
  95        /* the mmap semaphore is taken only if not in an atomic context */
  96        atomic = faulthandler_disabled();
  97
  98        if (!atomic)
  99                down_read(&current->mm->mmap_sem);
 100        while (n) {
 101                pte_t *pte;
 102                spinlock_t *ptl;
 103                int tocopy;
 104
 105                while (!pin_page_for_write(to, &pte, &ptl)) {
 106                        if (!atomic)
 107                                up_read(&current->mm->mmap_sem);
 108                        if (__put_user(0, (char __user *)to))
 109                                goto out;
 110                        if (!atomic)
 111                                down_read(&current->mm->mmap_sem);
 112                }
 113
 114                tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
 115                if (tocopy > n)
 116                        tocopy = n;
 117
 118                ua_flags = uaccess_save_and_enable();
 119                memcpy((void *)to, from, tocopy);
 120                uaccess_restore(ua_flags);
 121                to += tocopy;
 122                from += tocopy;
 123                n -= tocopy;
 124
 125                if (pte)
 126                        pte_unmap_unlock(pte, ptl);
 127                else
 128                        spin_unlock(ptl);
 129        }
 130        if (!atomic)
 131                up_read(&current->mm->mmap_sem);
 132
 133out:
 134        return n;
 135}
 136
 137unsigned long
 138arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 139{
 140        /*
 141         * This test is stubbed out of the main function above to keep
 142         * the overhead for small copies low by avoiding a large
 143         * register dump on the stack just to reload them right away.
 144         * With frame pointer disabled, tail call optimization kicks in
 145         * as well making this test almost invisible.
 146         */
 147        if (n < 64) {
 148                unsigned long ua_flags = uaccess_save_and_enable();
 149                n = __copy_to_user_std(to, from, n);
 150                uaccess_restore(ua_flags);
 151        } else {
 152                n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
 153                                          from, n);
 154        }
 155        return n;
 156}
 157        
 158static unsigned long noinline
 159__clear_user_memset(void __user *addr, unsigned long n)
 160{
 161        unsigned long ua_flags;
 162
 163        if (uaccess_kernel()) {
 164                memset((void *)addr, 0, n);
 165                return 0;
 166        }
 167
 168        down_read(&current->mm->mmap_sem);
 169        while (n) {
 170                pte_t *pte;
 171                spinlock_t *ptl;
 172                int tocopy;
 173
 174                while (!pin_page_for_write(addr, &pte, &ptl)) {
 175                        up_read(&current->mm->mmap_sem);
 176                        if (__put_user(0, (char __user *)addr))
 177                                goto out;
 178                        down_read(&current->mm->mmap_sem);
 179                }
 180
 181                tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
 182                if (tocopy > n)
 183                        tocopy = n;
 184
 185                ua_flags = uaccess_save_and_enable();
 186                memset((void *)addr, 0, tocopy);
 187                uaccess_restore(ua_flags);
 188                addr += tocopy;
 189                n -= tocopy;
 190
 191                if (pte)
 192                        pte_unmap_unlock(pte, ptl);
 193                else
 194                        spin_unlock(ptl);
 195        }
 196        up_read(&current->mm->mmap_sem);
 197
 198out:
 199        return n;
 200}
 201
 202unsigned long arm_clear_user(void __user *addr, unsigned long n)
 203{
 204        /* See rational for this in __copy_to_user() above. */
 205        if (n < 64) {
 206                unsigned long ua_flags = uaccess_save_and_enable();
 207                n = __clear_user_std(addr, n);
 208                uaccess_restore(ua_flags);
 209        } else {
 210                n = __clear_user_memset(addr, n);
 211        }
 212        return n;
 213}
 214
 215#if 0
 216
 217/*
 218 * This code is disabled by default, but kept around in case the chosen
 219 * thresholds need to be revalidated.  Some overhead (small but still)
 220 * would be implied by a runtime determined variable threshold, and
 221 * so far the measurement on concerned targets didn't show a worthwhile
 222 * variation.
 223 *
 224 * Note that a fairly precise sched_clock() implementation is needed
 225 * for results to make some sense.
 226 */
 227
 228#include <linux/vmalloc.h>
 229
 230static int __init test_size_treshold(void)
 231{
 232        struct page *src_page, *dst_page;
 233        void *user_ptr, *kernel_ptr;
 234        unsigned long long t0, t1, t2;
 235        int size, ret;
 236
 237        ret = -ENOMEM;
 238        src_page = alloc_page(GFP_KERNEL);
 239        if (!src_page)
 240                goto no_src;
 241        dst_page = alloc_page(GFP_KERNEL);
 242        if (!dst_page)
 243                goto no_dst;
 244        kernel_ptr = page_address(src_page);
 245        user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
 246        if (!user_ptr)
 247                goto no_vmap;
 248
 249        /* warm up the src page dcache */
 250        ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
 251
 252        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 253                t0 = sched_clock();
 254                ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
 255                t1 = sched_clock();
 256                ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
 257                t2 = sched_clock();
 258                printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 259        }
 260
 261        for (size = PAGE_SIZE; size >= 4; size /= 2) {
 262                t0 = sched_clock();
 263                ret |= __clear_user_memset(user_ptr, size);
 264                t1 = sched_clock();
 265                ret |= __clear_user_std(user_ptr, size);
 266                t2 = sched_clock();
 267                printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
 268        }
 269
 270        if (ret)
 271                ret = -EFAULT;
 272
 273        vunmap(user_ptr);
 274no_vmap:
 275        put_page(dst_page);
 276no_dst:
 277        put_page(src_page);
 278no_src:
 279        return ret;
 280}
 281
 282subsys_initcall(test_size_treshold);
 283
 284#endif
 285