linux/arch/s390/mm/maccess.c
<<
>>
Prefs
   1/*
   2 * Access kernel memory without faulting -- s390 specific implementation.
   3 *
   4 * Copyright IBM Corp. 2009, 2015
   5 *
   6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   7 *
   8 */
   9
  10#include <linux/uaccess.h>
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/errno.h>
  14#include <linux/gfp.h>
  15#include <linux/cpu.h>
  16#include <asm/ctl_reg.h>
  17#include <asm/io.h>
  18
  19static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
  20{
  21        unsigned long aligned, offset, count;
  22        char tmp[8];
  23
  24        aligned = (unsigned long) dst & ~7UL;
  25        offset = (unsigned long) dst & 7UL;
  26        size = min(8UL - offset, size);
  27        count = size - 1;
  28        asm volatile(
  29                "       bras    1,0f\n"
  30                "       mvc     0(1,%4),0(%5)\n"
  31                "0:     mvc     0(8,%3),0(%0)\n"
  32                "       ex      %1,0(1)\n"
  33                "       lg      %1,0(%3)\n"
  34                "       lra     %0,0(%0)\n"
  35                "       sturg   %1,%0\n"
  36                : "+&a" (aligned), "+&a" (count), "=m" (tmp)
  37                : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
  38                : "cc", "memory", "1");
  39        return size;
  40}
  41
  42/*
  43 * s390_kernel_write - write to kernel memory bypassing DAT
  44 * @dst: destination address
  45 * @src: source address
  46 * @size: number of bytes to copy
  47 *
  48 * This function writes to kernel memory bypassing DAT and possible page table
  49 * write protection. It writes to the destination using the sturg instruction.
  50 * Therefore we have a read-modify-write sequence: the function reads eight
  51 * bytes from destination at an eight byte boundary, modifies the bytes
  52 * requested and writes the result back in a loop.
  53 *
  54 * Note: this means that this function may not be called concurrently on
  55 *       several cpus with overlapping words, since this may potentially
  56 *       cause data corruption.
  57 */
  58void notrace s390_kernel_write(void *dst, const void *src, size_t size)
  59{
  60        long copied;
  61
  62        while (size) {
  63                copied = s390_kernel_write_odd(dst, src, size);
  64                dst += copied;
  65                src += copied;
  66                size -= copied;
  67        }
  68}
  69
  70static int __memcpy_real(void *dest, void *src, size_t count)
  71{
  72        register unsigned long _dest asm("2") = (unsigned long) dest;
  73        register unsigned long _len1 asm("3") = (unsigned long) count;
  74        register unsigned long _src  asm("4") = (unsigned long) src;
  75        register unsigned long _len2 asm("5") = (unsigned long) count;
  76        int rc = -EFAULT;
  77
  78        asm volatile (
  79                "0:     mvcle   %1,%2,0x0\n"
  80                "1:     jo      0b\n"
  81                "       lhi     %0,0x0\n"
  82                "2:\n"
  83                EX_TABLE(1b,2b)
  84                : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1),
  85                  "+d" (_len2), "=m" (*((long *) dest))
  86                : "m" (*((long *) src))
  87                : "cc", "memory");
  88        return rc;
  89}
  90
  91/*
  92 * Copy memory in real mode (kernel to kernel)
  93 */
  94int memcpy_real(void *dest, void *src, size_t count)
  95{
  96        int irqs_disabled, rc;
  97        unsigned long flags;
  98
  99        if (!count)
 100                return 0;
 101        flags = __arch_local_irq_stnsm(0xf8UL);
 102        irqs_disabled = arch_irqs_disabled_flags(flags);
 103        if (!irqs_disabled)
 104                trace_hardirqs_off();
 105        rc = __memcpy_real(dest, src, count);
 106        if (!irqs_disabled)
 107                trace_hardirqs_on();
 108        __arch_local_irq_ssm(flags);
 109        return rc;
 110}
 111
 112/*
 113 * Copy memory in absolute mode (kernel to kernel)
 114 */
 115void memcpy_absolute(void *dest, void *src, size_t count)
 116{
 117        unsigned long cr0, flags, prefix;
 118
 119        flags = arch_local_irq_save();
 120        __ctl_store(cr0, 0, 0);
 121        __ctl_clear_bit(0, 28); /* disable lowcore protection */
 122        prefix = store_prefix();
 123        if (prefix) {
 124                local_mcck_disable();
 125                set_prefix(0);
 126                memcpy(dest, src, count);
 127                set_prefix(prefix);
 128                local_mcck_enable();
 129        } else {
 130                memcpy(dest, src, count);
 131        }
 132        __ctl_load(cr0, 0, 0);
 133        arch_local_irq_restore(flags);
 134}
 135
 136/*
 137 * Copy memory from kernel (real) to user (virtual)
 138 */
 139int copy_to_user_real(void __user *dest, void *src, unsigned long count)
 140{
 141        int offs = 0, size, rc;
 142        char *buf;
 143
 144        buf = (char *) __get_free_page(GFP_KERNEL);
 145        if (!buf)
 146                return -ENOMEM;
 147        rc = -EFAULT;
 148        while (offs < count) {
 149                size = min(PAGE_SIZE, count - offs);
 150                if (memcpy_real(buf, src + offs, size))
 151                        goto out;
 152                if (copy_to_user(dest + offs, buf, size))
 153                        goto out;
 154                offs += size;
 155        }
 156        rc = 0;
 157out:
 158        free_page((unsigned long) buf);
 159        return rc;
 160}
 161
 162/*
 163 * Check if physical address is within prefix or zero page
 164 */
 165static int is_swapped(unsigned long addr)
 166{
 167        unsigned long lc;
 168        int cpu;
 169
 170        if (addr < sizeof(struct lowcore))
 171                return 1;
 172        for_each_online_cpu(cpu) {
 173                lc = (unsigned long) lowcore_ptr[cpu];
 174                if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
 175                        continue;
 176                return 1;
 177        }
 178        return 0;
 179}
 180
 181/*
 182 * Convert a physical pointer for /dev/mem access
 183 *
 184 * For swapped prefix pages a new buffer is returned that contains a copy of
 185 * the absolute memory. The buffer size is maximum one page large.
 186 */
 187void *xlate_dev_mem_ptr(phys_addr_t addr)
 188{
 189        void *bounce = (void *) addr;
 190        unsigned long size;
 191
 192        get_online_cpus();
 193        preempt_disable();
 194        if (is_swapped(addr)) {
 195                size = PAGE_SIZE - (addr & ~PAGE_MASK);
 196                bounce = (void *) __get_free_page(GFP_ATOMIC);
 197                if (bounce)
 198                        memcpy_absolute(bounce, (void *) addr, size);
 199        }
 200        preempt_enable();
 201        put_online_cpus();
 202        return bounce;
 203}
 204
 205/*
 206 * Free converted buffer for /dev/mem access (if necessary)
 207 */
 208void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
 209{
 210        if ((void *) addr != buf)
 211                free_page((unsigned long) buf);
 212}
 213