linux/arch/s390/mm/maccess.c
<<
>>
Prefs
   1/*
   2 * Access kernel memory without faulting -- s390 specific implementation.
   3 *
   4 * Copyright IBM Corp. 2009
   5 *
   6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   7 *
   8 */
   9
  10#include <linux/uaccess.h>
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/errno.h>
  14#include <linux/gfp.h>
  15#include <linux/cpu.h>
  16#include <asm/ctl_reg.h>
  17#include <asm/io.h>
  18
  19/*
  20 * This function writes to kernel memory bypassing DAT and possible
  21 * write protection. It copies one to four bytes from src to dst
  22 * using the stura instruction.
  23 * Returns the number of bytes copied or -EFAULT.
  24 */
  25static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
  26{
  27        unsigned long count, aligned;
  28        int offset, mask;
  29        int rc = -EFAULT;
  30
  31        aligned = (unsigned long) dst & ~3UL;
  32        offset = (unsigned long) dst & 3;
  33        count = min_t(unsigned long, 4 - offset, size);
  34        mask = (0xf << (4 - count)) & 0xf;
  35        mask >>= offset;
  36        asm volatile(
  37                "       bras    1,0f\n"
  38                "       icm     0,0,0(%3)\n"
  39                "0:     l       0,0(%1)\n"
  40                "       lra     %1,0(%1)\n"
  41                "1:     ex      %2,0(1)\n"
  42                "2:     stura   0,%1\n"
  43                "       la      %0,0\n"
  44                "3:\n"
  45                EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
  46                : "+d" (rc), "+a" (aligned)
  47                : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
  48        return rc ? rc : count;
  49}
  50
  51long probe_kernel_write(void *dst, const void *src, size_t size)
  52{
  53        long copied = 0;
  54
  55        while (size) {
  56                copied = probe_kernel_write_odd(dst, src, size);
  57                if (copied < 0)
  58                        break;
  59                dst += copied;
  60                src += copied;
  61                size -= copied;
  62        }
  63        return copied < 0 ? -EFAULT : 0;
  64}
  65
  66static int __memcpy_real(void *dest, void *src, size_t count)
  67{
  68        register unsigned long _dest asm("2") = (unsigned long) dest;
  69        register unsigned long _len1 asm("3") = (unsigned long) count;
  70        register unsigned long _src  asm("4") = (unsigned long) src;
  71        register unsigned long _len2 asm("5") = (unsigned long) count;
  72        int rc = -EFAULT;
  73
  74        asm volatile (
  75                "0:     mvcle   %1,%2,0x0\n"
  76                "1:     jo      0b\n"
  77                "       lhi     %0,0x0\n"
  78                "2:\n"
  79                EX_TABLE(1b,2b)
  80                : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  81                  "+d" (_len2), "=m" (*((long *) dest))
  82                : "m" (*((long *) src))
  83                : "cc", "memory");
  84        return rc;
  85}
  86
  87/*
  88 * Copy memory in real mode (kernel to kernel)
  89 */
  90int memcpy_real(void *dest, void *src, size_t count)
  91{
  92        unsigned long flags;
  93        int rc;
  94
  95        if (!count)
  96                return 0;
  97        local_irq_save(flags);
  98        __arch_local_irq_stnsm(0xfbUL);
  99        rc = __memcpy_real(dest, src, count);
 100        local_irq_restore(flags);
 101        return rc;
 102}
 103
 104/*
 105 * Copy memory in absolute mode (kernel to kernel)
 106 */
 107void memcpy_absolute(void *dest, void *src, size_t count)
 108{
 109        unsigned long cr0, flags, prefix;
 110
 111        flags = arch_local_irq_save();
 112        __ctl_store(cr0, 0, 0);
 113        __ctl_clear_bit(0, 28); /* disable lowcore protection */
 114        prefix = store_prefix();
 115        if (prefix) {
 116                local_mcck_disable();
 117                set_prefix(0);
 118                memcpy(dest, src, count);
 119                set_prefix(prefix);
 120                local_mcck_enable();
 121        } else {
 122                memcpy(dest, src, count);
 123        }
 124        __ctl_load(cr0, 0, 0);
 125        arch_local_irq_restore(flags);
 126}
 127
 128/*
 129 * Copy memory from kernel (real) to user (virtual)
 130 */
 131int copy_to_user_real(void __user *dest, void *src, size_t count)
 132{
 133        int offs = 0, size, rc;
 134        char *buf;
 135
 136        buf = (char *) __get_free_page(GFP_KERNEL);
 137        if (!buf)
 138                return -ENOMEM;
 139        rc = -EFAULT;
 140        while (offs < count) {
 141                size = min(PAGE_SIZE, count - offs);
 142                if (memcpy_real(buf, src + offs, size))
 143                        goto out;
 144                if (copy_to_user(dest + offs, buf, size))
 145                        goto out;
 146                offs += size;
 147        }
 148        rc = 0;
 149out:
 150        free_page((unsigned long) buf);
 151        return rc;
 152}
 153
 154/*
 155 * Copy memory from user (virtual) to kernel (real)
 156 */
 157int copy_from_user_real(void *dest, void __user *src, size_t count)
 158{
 159        int offs = 0, size, rc;
 160        char *buf;
 161
 162        buf = (char *) __get_free_page(GFP_KERNEL);
 163        if (!buf)
 164                return -ENOMEM;
 165        rc = -EFAULT;
 166        while (offs < count) {
 167                size = min(PAGE_SIZE, count - offs);
 168                if (copy_from_user(buf, src + offs, size))
 169                        goto out;
 170                if (memcpy_real(dest + offs, buf, size))
 171                        goto out;
 172                offs += size;
 173        }
 174        rc = 0;
 175out:
 176        free_page((unsigned long) buf);
 177        return rc;
 178}
 179
 180/*
 181 * Check if physical address is within prefix or zero page
 182 */
 183static int is_swapped(unsigned long addr)
 184{
 185        unsigned long lc;
 186        int cpu;
 187
 188        if (addr < sizeof(struct _lowcore))
 189                return 1;
 190        for_each_online_cpu(cpu) {
 191                lc = (unsigned long) lowcore_ptr[cpu];
 192                if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc)
 193                        continue;
 194                return 1;
 195        }
 196        return 0;
 197}
 198
 199/*
 200 * Convert a physical pointer for /dev/mem access
 201 *
 202 * For swapped prefix pages a new buffer is returned that contains a copy of
 203 * the absolute memory. The buffer size is maximum one page large.
 204 */
 205void *xlate_dev_mem_ptr(unsigned long addr)
 206{
 207        void *bounce = (void *) addr;
 208        unsigned long size;
 209
 210        get_online_cpus();
 211        preempt_disable();
 212        if (is_swapped(addr)) {
 213                size = PAGE_SIZE - (addr & ~PAGE_MASK);
 214                bounce = (void *) __get_free_page(GFP_ATOMIC);
 215                if (bounce)
 216                        memcpy_absolute(bounce, (void *) addr, size);
 217        }
 218        preempt_enable();
 219        put_online_cpus();
 220        return bounce;
 221}
 222
 223/*
 224 * Free converted buffer for /dev/mem access (if necessary)
 225 */
 226void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
 227{
 228        if ((void *) addr != buf)
 229                free_page((unsigned long) buf);
 230}
 231