linux/arch/s390/lib/uaccess_pt.c
<<
>>
Prefs
   1/*
   2 *  User access functions based on page table walks for enhanced
   3 *  system layout without hardware support.
   4 *
   5 *    Copyright IBM Corp. 2006, 2012
   6 *    Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
   7 */
   8
   9#include <linux/errno.h>
  10#include <linux/hardirq.h>
  11#include <linux/mm.h>
  12#include <linux/hugetlb.h>
  13#include <asm/uaccess.h>
  14#include <asm/futex.h>
  15#include "uaccess.h"
  16
  17#ifndef CONFIG_64BIT
  18#define AHI     "ahi"
  19#define SLR     "slr"
  20#else
  21#define AHI     "aghi"
  22#define SLR     "slgr"
  23#endif
  24
  25static size_t strnlen_kernel(size_t count, const char __user *src)
  26{
  27        register unsigned long reg0 asm("0") = 0UL;
  28        unsigned long tmp1, tmp2;
  29
  30        asm volatile(
  31                "   la    %2,0(%1)\n"
  32                "   la    %3,0(%0,%1)\n"
  33                "  "SLR"  %0,%0\n"
  34                "0: srst  %3,%2\n"
  35                "   jo    0b\n"
  36                "   la    %0,1(%3)\n"   /* strnlen_kernel results includes \0 */
  37                "  "SLR"  %0,%1\n"
  38                "1:\n"
  39                EX_TABLE(0b,1b)
  40                : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  41                : "d" (reg0) : "cc", "memory");
  42        return count;
  43}
  44
  45static size_t copy_in_kernel(size_t count, void __user *to,
  46                             const void __user *from)
  47{
  48        unsigned long tmp1;
  49
  50        asm volatile(
  51                "  "AHI"  %0,-1\n"
  52                "   jo    5f\n"
  53                "   bras  %3,3f\n"
  54                "0:"AHI"  %0,257\n"
  55                "1: mvc   0(1,%1),0(%2)\n"
  56                "   la    %1,1(%1)\n"
  57                "   la    %2,1(%2)\n"
  58                "  "AHI"  %0,-1\n"
  59                "   jnz   1b\n"
  60                "   j     5f\n"
  61                "2: mvc   0(256,%1),0(%2)\n"
  62                "   la    %1,256(%1)\n"
  63                "   la    %2,256(%2)\n"
  64                "3:"AHI"  %0,-256\n"
  65                "   jnm   2b\n"
  66                "4: ex    %0,1b-0b(%3)\n"
  67                "5:"SLR"  %0,%0\n"
  68                "6:\n"
  69                EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  70                : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
  71                : : "cc", "memory");
  72        return count;
  73}
  74
  75/*
  76 * Returns kernel address for user virtual address. If the returned address is
  77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  78 * contains the (negative) exception code.
  79 */
  80#ifdef CONFIG_64BIT
  81static unsigned long follow_table(struct mm_struct *mm,
  82                                  unsigned long address, int write)
  83{
  84        unsigned long *table = (unsigned long *)__pa(mm->pgd);
  85
  86        switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
  87        case _ASCE_TYPE_REGION1:
  88                table = table + ((address >> 53) & 0x7ff);
  89                if (unlikely(*table & _REGION_ENTRY_INVALID))
  90                        return -0x39UL;
  91                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  92                /* fallthrough */
  93        case _ASCE_TYPE_REGION2:
  94                table = table + ((address >> 42) & 0x7ff);
  95                if (unlikely(*table & _REGION_ENTRY_INVALID))
  96                        return -0x3aUL;
  97                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  98                /* fallthrough */
  99        case _ASCE_TYPE_REGION3:
 100                table = table + ((address >> 31) & 0x7ff);
 101                if (unlikely(*table & _REGION_ENTRY_INVALID))
 102                        return -0x3bUL;
 103                table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 104                /* fallthrough */
 105        case _ASCE_TYPE_SEGMENT:
 106                table = table + ((address >> 20) & 0x7ff);
 107                if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
 108                        return -0x10UL;
 109                if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
 110                        if (write && (*table & _SEGMENT_ENTRY_PROTECT))
 111                                return -0x04UL;
 112                        return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
 113                                (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
 114                }
 115                table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 116        }
 117        table = table + ((address >> 12) & 0xff);
 118        if (unlikely(*table & _PAGE_INVALID))
 119                return -0x11UL;
 120        if (write && (*table & _PAGE_PROTECT))
 121                return -0x04UL;
 122        return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 123}
 124
 125#else /* CONFIG_64BIT */
 126
 127static unsigned long follow_table(struct mm_struct *mm,
 128                                  unsigned long address, int write)
 129{
 130        unsigned long *table = (unsigned long *)__pa(mm->pgd);
 131
 132        table = table + ((address >> 20) & 0x7ff);
 133        if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
 134                return -0x10UL;
 135        table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
 136        table = table + ((address >> 12) & 0xff);
 137        if (unlikely(*table & _PAGE_INVALID))
 138                return -0x11UL;
 139        if (write && (*table & _PAGE_PROTECT))
 140                return -0x04UL;
 141        return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 142}
 143
 144#endif /* CONFIG_64BIT */
 145
 146static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
 147                                             size_t n, int write_user)
 148{
 149        struct mm_struct *mm = current->mm;
 150        unsigned long offset, done, size, kaddr;
 151        void *from, *to;
 152
 153        done = 0;
 154retry:
 155        spin_lock(&mm->page_table_lock);
 156        do {
 157                kaddr = follow_table(mm, uaddr, write_user);
 158                if (IS_ERR_VALUE(kaddr))
 159                        goto fault;
 160
 161                offset = uaddr & ~PAGE_MASK;
 162                size = min(n - done, PAGE_SIZE - offset);
 163                if (write_user) {
 164                        to = (void *) kaddr;
 165                        from = kptr + done;
 166                } else {
 167                        from = (void *) kaddr;
 168                        to = kptr + done;
 169                }
 170                memcpy(to, from, size);
 171                done += size;
 172                uaddr += size;
 173        } while (done < n);
 174        spin_unlock(&mm->page_table_lock);
 175        return n - done;
 176fault:
 177        spin_unlock(&mm->page_table_lock);
 178        if (__handle_fault(uaddr, -kaddr, write_user))
 179                return n - done;
 180        goto retry;
 181}
 182
 183/*
 184 * Do DAT for user address by page table walk, return kernel address.
 185 * This function needs to be called with current->mm->page_table_lock held.
 186 */
 187static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
 188                                                     int write)
 189{
 190        struct mm_struct *mm = current->mm;
 191        unsigned long kaddr;
 192        int rc;
 193
 194retry:
 195        kaddr = follow_table(mm, uaddr, write);
 196        if (IS_ERR_VALUE(kaddr))
 197                goto fault;
 198
 199        return kaddr;
 200fault:
 201        spin_unlock(&mm->page_table_lock);
 202        rc = __handle_fault(uaddr, -kaddr, write);
 203        spin_lock(&mm->page_table_lock);
 204        if (!rc)
 205                goto retry;
 206        return 0;
 207}
 208
 209size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
 210{
 211        size_t rc;
 212
 213        if (segment_eq(get_fs(), KERNEL_DS))
 214                return copy_in_kernel(n, (void __user *) to, from);
 215        rc = __user_copy_pt((unsigned long) from, to, n, 0);
 216        if (unlikely(rc))
 217                memset(to + n - rc, 0, rc);
 218        return rc;
 219}
 220
 221size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
 222{
 223        if (segment_eq(get_fs(), KERNEL_DS))
 224                return copy_in_kernel(n, to, (void __user *) from);
 225        return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
 226}
 227
 228static size_t clear_user_pt(size_t n, void __user *to)
 229{
 230        void *zpage = (void *) empty_zero_page;
 231        long done, size, ret;
 232
 233        done = 0;
 234        do {
 235                if (n - done > PAGE_SIZE)
 236                        size = PAGE_SIZE;
 237                else
 238                        size = n - done;
 239                if (segment_eq(get_fs(), KERNEL_DS))
 240                        ret = copy_in_kernel(n, to, (void __user *) zpage);
 241                else
 242                        ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
 243                done += size;
 244                to += size;
 245                if (ret)
 246                        return ret + n - done;
 247        } while (done < n);
 248        return 0;
 249}
 250
 251static size_t strnlen_user_pt(size_t count, const char __user *src)
 252{
 253        unsigned long uaddr = (unsigned long) src;
 254        struct mm_struct *mm = current->mm;
 255        unsigned long offset, done, len, kaddr;
 256        size_t len_str;
 257
 258        if (unlikely(!count))
 259                return 0;
 260        if (segment_eq(get_fs(), KERNEL_DS))
 261                return strnlen_kernel(count, src);
 262        done = 0;
 263retry:
 264        spin_lock(&mm->page_table_lock);
 265        do {
 266                kaddr = follow_table(mm, uaddr, 0);
 267                if (IS_ERR_VALUE(kaddr))
 268                        goto fault;
 269
 270                offset = uaddr & ~PAGE_MASK;
 271                len = min(count - done, PAGE_SIZE - offset);
 272                len_str = strnlen((char *) kaddr, len);
 273                done += len_str;
 274                uaddr += len_str;
 275        } while ((len_str == len) && (done < count));
 276        spin_unlock(&mm->page_table_lock);
 277        return done + 1;
 278fault:
 279        spin_unlock(&mm->page_table_lock);
 280        if (__handle_fault(uaddr, -kaddr, 0))
 281                return 0;
 282        goto retry;
 283}
 284
 285static size_t strncpy_from_user_pt(size_t count, const char __user *src,
 286                                   char *dst)
 287{
 288        size_t done, len, offset, len_str;
 289
 290        if (unlikely(!count))
 291                return 0;
 292        done = 0;
 293        do {
 294                offset = (size_t)src & ~PAGE_MASK;
 295                len = min(count - done, PAGE_SIZE - offset);
 296                if (segment_eq(get_fs(), KERNEL_DS)) {
 297                        if (copy_in_kernel(len, (void __user *) dst, src))
 298                                return -EFAULT;
 299                } else {
 300                        if (__user_copy_pt((unsigned long) src, dst, len, 0))
 301                                return -EFAULT;
 302                }
 303                len_str = strnlen(dst, len);
 304                done += len_str;
 305                src += len_str;
 306                dst += len_str;
 307        } while ((len_str == len) && (done < count));
 308        return done;
 309}
 310
 311static size_t copy_in_user_pt(size_t n, void __user *to,
 312                              const void __user *from)
 313{
 314        struct mm_struct *mm = current->mm;
 315        unsigned long offset_max, uaddr, done, size, error_code;
 316        unsigned long uaddr_from = (unsigned long) from;
 317        unsigned long uaddr_to = (unsigned long) to;
 318        unsigned long kaddr_to, kaddr_from;
 319        int write_user;
 320
 321        if (segment_eq(get_fs(), KERNEL_DS))
 322                return copy_in_kernel(n, to, from);
 323        done = 0;
 324retry:
 325        spin_lock(&mm->page_table_lock);
 326        do {
 327                write_user = 0;
 328                uaddr = uaddr_from;
 329                kaddr_from = follow_table(mm, uaddr_from, 0);
 330                error_code = kaddr_from;
 331                if (IS_ERR_VALUE(error_code))
 332                        goto fault;
 333
 334                write_user = 1;
 335                uaddr = uaddr_to;
 336                kaddr_to = follow_table(mm, uaddr_to, 1);
 337                error_code = (unsigned long) kaddr_to;
 338                if (IS_ERR_VALUE(error_code))
 339                        goto fault;
 340
 341                offset_max = max(uaddr_from & ~PAGE_MASK,
 342                                 uaddr_to & ~PAGE_MASK);
 343                size = min(n - done, PAGE_SIZE - offset_max);
 344
 345                memcpy((void *) kaddr_to, (void *) kaddr_from, size);
 346                done += size;
 347                uaddr_from += size;
 348                uaddr_to += size;
 349        } while (done < n);
 350        spin_unlock(&mm->page_table_lock);
 351        return n - done;
 352fault:
 353        spin_unlock(&mm->page_table_lock);
 354        if (__handle_fault(uaddr, -error_code, write_user))
 355                return n - done;
 356        goto retry;
 357}
 358
 359#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)      \
 360        asm volatile("0: l   %1,0(%6)\n"                                \
 361                     "1: " insn                                         \
 362                     "2: cs  %1,%2,0(%6)\n"                             \
 363                     "3: jl  1b\n"                                      \
 364                     "   lhi %0,0\n"                                    \
 365                     "4:\n"                                             \
 366                     EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)    \
 367                     : "=d" (ret), "=&d" (oldval), "=&d" (newval),      \
 368                       "=m" (*uaddr)                                    \
 369                     : "0" (-EFAULT), "d" (oparg), "a" (uaddr),         \
 370                       "m" (*uaddr) : "cc" );
 371
 372static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
 373{
 374        int oldval = 0, newval, ret;
 375
 376        switch (op) {
 377        case FUTEX_OP_SET:
 378                __futex_atomic_op("lr %2,%5\n",
 379                                  ret, oldval, newval, uaddr, oparg);
 380                break;
 381        case FUTEX_OP_ADD:
 382                __futex_atomic_op("lr %2,%1\nar %2,%5\n",
 383                                  ret, oldval, newval, uaddr, oparg);
 384                break;
 385        case FUTEX_OP_OR:
 386                __futex_atomic_op("lr %2,%1\nor %2,%5\n",
 387                                  ret, oldval, newval, uaddr, oparg);
 388                break;
 389        case FUTEX_OP_ANDN:
 390                __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
 391                                  ret, oldval, newval, uaddr, oparg);
 392                break;
 393        case FUTEX_OP_XOR:
 394                __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
 395                                  ret, oldval, newval, uaddr, oparg);
 396                break;
 397        default:
 398                ret = -ENOSYS;
 399        }
 400        if (ret == 0)
 401                *old = oldval;
 402        return ret;
 403}
 404
 405int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
 406{
 407        int ret;
 408
 409        if (segment_eq(get_fs(), KERNEL_DS))
 410                return __futex_atomic_op_pt(op, uaddr, oparg, old);
 411        spin_lock(&current->mm->page_table_lock);
 412        uaddr = (u32 __force __user *)
 413                __dat_user_addr((__force unsigned long) uaddr, 1);
 414        if (!uaddr) {
 415                spin_unlock(&current->mm->page_table_lock);
 416                return -EFAULT;
 417        }
 418        get_page(virt_to_page(uaddr));
 419        spin_unlock(&current->mm->page_table_lock);
 420        ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
 421        put_page(virt_to_page(uaddr));
 422        return ret;
 423}
 424
 425static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
 426                                     u32 oldval, u32 newval)
 427{
 428        int ret;
 429
 430        asm volatile("0: cs   %1,%4,0(%5)\n"
 431                     "1: la   %0,0\n"
 432                     "2:\n"
 433                     EX_TABLE(0b,2b) EX_TABLE(1b,2b)
 434                     : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
 435                     : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
 436                     : "cc", "memory" );
 437        *uval = oldval;
 438        return ret;
 439}
 440
 441int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
 442                            u32 oldval, u32 newval)
 443{
 444        int ret;
 445
 446        if (segment_eq(get_fs(), KERNEL_DS))
 447                return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
 448        spin_lock(&current->mm->page_table_lock);
 449        uaddr = (u32 __force __user *)
 450                __dat_user_addr((__force unsigned long) uaddr, 1);
 451        if (!uaddr) {
 452                spin_unlock(&current->mm->page_table_lock);
 453                return -EFAULT;
 454        }
 455        get_page(virt_to_page(uaddr));
 456        spin_unlock(&current->mm->page_table_lock);
 457        ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
 458        put_page(virt_to_page(uaddr));
 459        return ret;
 460}
 461
 462struct uaccess_ops uaccess_pt = {
 463        .copy_from_user         = copy_from_user_pt,
 464        .copy_from_user_small   = copy_from_user_pt,
 465        .copy_to_user           = copy_to_user_pt,
 466        .copy_to_user_small     = copy_to_user_pt,
 467        .copy_in_user           = copy_in_user_pt,
 468        .clear_user             = clear_user_pt,
 469        .strnlen_user           = strnlen_user_pt,
 470        .strncpy_from_user      = strncpy_from_user_pt,
 471        .futex_atomic_op        = futex_atomic_op_pt,
 472        .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
 473};
 474