linux/arch/x86/kernel/vsyscall_64.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
   3 *  Copyright 2003 Andi Kleen, SuSE Labs.
   4 *
   5 *  [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
   6 *
   7 *  Thanks to hpa@transmeta.com for some useful hint.
   8 *  Special thanks to Ingo Molnar for his early experience with
   9 *  a different vsyscall implementation for Linux/IA32 and for the name.
  10 *
  11 *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
  12 *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
  13 *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
  14 *  jumping out of line if necessary. We cannot add more with this
  15 *  mechanism because older kernels won't return -ENOSYS.
  16 *
  17 *  Note: the concept clashes with user mode linux.  UML users should
  18 *  use the vDSO.
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/time.h>
  24#include <linux/init.h>
  25#include <linux/kernel.h>
  26#include <linux/timer.h>
  27#include <linux/seqlock.h>
  28#include <linux/jiffies.h>
  29#include <linux/sysctl.h>
  30#include <linux/topology.h>
  31#include <linux/timekeeper_internal.h>
  32#include <linux/getcpu.h>
  33#include <linux/cpu.h>
  34#include <linux/smp.h>
  35#include <linux/notifier.h>
  36#include <linux/syscalls.h>
  37#include <linux/ratelimit.h>
  38
  39#include <asm/vsyscall.h>
  40#include <asm/pgtable.h>
  41#include <asm/compat.h>
  42#include <asm/page.h>
  43#include <asm/unistd.h>
  44#include <asm/fixmap.h>
  45#include <asm/errno.h>
  46#include <asm/io.h>
  47#include <asm/segment.h>
  48#include <asm/desc.h>
  49#include <asm/topology.h>
  50#include <asm/vgtod.h>
  51#include <asm/traps.h>
  52
  53#define CREATE_TRACE_POINTS
  54#include "vsyscall_trace.h"
  55
  56DEFINE_VVAR(int, vgetcpu_mode);
  57DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
  58
  59static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
  60
  61static int __init vsyscall_setup(char *str)
  62{
  63        if (str) {
  64                if (!strcmp("emulate", str))
  65                        vsyscall_mode = EMULATE;
  66                else if (!strcmp("native", str))
  67                        vsyscall_mode = NATIVE;
  68                else if (!strcmp("none", str))
  69                        vsyscall_mode = NONE;
  70                else
  71                        return -EINVAL;
  72
  73                return 0;
  74        }
  75
  76        return -EINVAL;
  77}
  78early_param("vsyscall", vsyscall_setup);
  79
  80void update_vsyscall_tz(void)
  81{
  82        vsyscall_gtod_data.sys_tz = sys_tz;
  83}
  84
  85void update_vsyscall(struct timekeeper *tk)
  86{
  87        struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
  88
  89        write_seqcount_begin(&vdata->seq);
  90
  91        /* copy vsyscall data */
  92        vdata->clock.vclock_mode        = tk->clock->archdata.vclock_mode;
  93        vdata->clock.cycle_last         = tk->clock->cycle_last;
  94        vdata->clock.mask               = tk->clock->mask;
  95        vdata->clock.mult               = tk->mult;
  96        vdata->clock.shift              = tk->shift;
  97
  98        vdata->wall_time_sec            = tk->xtime_sec;
  99        vdata->wall_time_snsec          = tk->xtime_nsec;
 100
 101        vdata->monotonic_time_sec       = tk->xtime_sec
 102                                        + tk->wall_to_monotonic.tv_sec;
 103        vdata->monotonic_time_snsec     = tk->xtime_nsec
 104                                        + (tk->wall_to_monotonic.tv_nsec
 105                                                << tk->shift);
 106        while (vdata->monotonic_time_snsec >=
 107                                        (((u64)NSEC_PER_SEC) << tk->shift)) {
 108                vdata->monotonic_time_snsec -=
 109                                        ((u64)NSEC_PER_SEC) << tk->shift;
 110                vdata->monotonic_time_sec++;
 111        }
 112
 113        vdata->wall_time_coarse.tv_sec  = tk->xtime_sec;
 114        vdata->wall_time_coarse.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
 115
 116        vdata->monotonic_time_coarse    = timespec_add(vdata->wall_time_coarse,
 117                                                        tk->wall_to_monotonic);
 118
 119        write_seqcount_end(&vdata->seq);
 120}
 121
 122static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
 123                              const char *message)
 124{
 125        if (!show_unhandled_signals)
 126                return;
 127
 128        pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
 129                              level, current->comm, task_pid_nr(current),
 130                              message, regs->ip, regs->cs,
 131                              regs->sp, regs->ax, regs->si, regs->di);
 132}
 133
 134static int addr_to_vsyscall_nr(unsigned long addr)
 135{
 136        int nr;
 137
 138        if ((addr & ~0xC00UL) != VSYSCALL_START)
 139                return -EINVAL;
 140
 141        nr = (addr & 0xC00UL) >> 10;
 142        if (nr >= 3)
 143                return -EINVAL;
 144
 145        return nr;
 146}
 147
 148static bool write_ok_or_segv(unsigned long ptr, size_t size)
 149{
 150        /*
 151         * XXX: if access_ok, get_user, and put_user handled
 152         * sig_on_uaccess_error, this could go away.
 153         */
 154
 155        if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
 156                siginfo_t info;
 157                struct thread_struct *thread = &current->thread;
 158
 159                thread->error_code      = 6;  /* user fault, no page, write */
 160                thread->cr2             = ptr;
 161                thread->trap_nr         = X86_TRAP_PF;
 162
 163                memset(&info, 0, sizeof(info));
 164                info.si_signo           = SIGSEGV;
 165                info.si_errno           = 0;
 166                info.si_code            = SEGV_MAPERR;
 167                info.si_addr            = (void __user *)ptr;
 168
 169                force_sig_info(SIGSEGV, &info, current);
 170                return false;
 171        } else {
 172                return true;
 173        }
 174}
 175
 176bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 177{
 178        struct task_struct *tsk;
 179        unsigned long caller;
 180        int vsyscall_nr, syscall_nr, tmp;
 181        int prev_sig_on_uaccess_error;
 182        long ret;
 183
 184        /*
 185         * No point in checking CS -- the only way to get here is a user mode
 186         * trap to a high address, which means that we're in 64-bit user code.
 187         */
 188
 189        WARN_ON_ONCE(address != regs->ip);
 190
 191        if (vsyscall_mode == NONE) {
 192                warn_bad_vsyscall(KERN_INFO, regs,
 193                                  "vsyscall attempted with vsyscall=none");
 194                return false;
 195        }
 196
 197        vsyscall_nr = addr_to_vsyscall_nr(address);
 198
 199        trace_emulate_vsyscall(vsyscall_nr);
 200
 201        if (vsyscall_nr < 0) {
 202                warn_bad_vsyscall(KERN_WARNING, regs,
 203                                  "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
 204                goto sigsegv;
 205        }
 206
 207        if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
 208                warn_bad_vsyscall(KERN_WARNING, regs,
 209                                  "vsyscall with bad stack (exploit attempt?)");
 210                goto sigsegv;
 211        }
 212
 213        tsk = current;
 214
 215        /*
 216         * Check for access_ok violations and find the syscall nr.
 217         *
 218         * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
 219         * 64-bit, so we don't need to special-case it here.  For all the
 220         * vsyscalls, NULL means "don't write anything" not "write it at
 221         * address 0".
 222         */
 223        switch (vsyscall_nr) {
 224        case 0:
 225                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
 226                    !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
 227                        ret = -EFAULT;
 228                        goto check_fault;
 229                }
 230
 231                syscall_nr = __NR_gettimeofday;
 232                break;
 233
 234        case 1:
 235                if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
 236                        ret = -EFAULT;
 237                        goto check_fault;
 238                }
 239
 240                syscall_nr = __NR_time;
 241                break;
 242
 243        case 2:
 244                if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
 245                    !write_ok_or_segv(regs->si, sizeof(unsigned))) {
 246                        ret = -EFAULT;
 247                        goto check_fault;
 248                }
 249
 250                syscall_nr = __NR_getcpu;
 251                break;
 252        }
 253
 254        /*
 255         * Handle seccomp.  regs->ip must be the original value.
 256         * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
 257         *
 258         * We could optimize the seccomp disabled case, but performance
 259         * here doesn't matter.
 260         */
 261        regs->orig_ax = syscall_nr;
 262        regs->ax = -ENOSYS;
 263        tmp = secure_computing(syscall_nr);
 264        if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
 265                warn_bad_vsyscall(KERN_DEBUG, regs,
 266                                  "seccomp tried to change syscall nr or ip");
 267                do_exit(SIGSYS);
 268        }
 269        if (tmp)
 270                goto do_ret;  /* skip requested */
 271
 272        /*
 273         * With a real vsyscall, page faults cause SIGSEGV.  We want to
 274         * preserve that behavior to make writing exploits harder.
 275         */
 276        prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
 277        current_thread_info()->sig_on_uaccess_error = 1;
 278
 279        ret = -EFAULT;
 280        switch (vsyscall_nr) {
 281        case 0:
 282                ret = sys_gettimeofday(
 283                        (struct timeval __user *)regs->di,
 284                        (struct timezone __user *)regs->si);
 285                break;
 286
 287        case 1:
 288                ret = sys_time((time_t __user *)regs->di);
 289                break;
 290
 291        case 2:
 292                ret = sys_getcpu((unsigned __user *)regs->di,
 293                                 (unsigned __user *)regs->si,
 294                                 NULL);
 295                break;
 296        }
 297
 298        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 299
 300check_fault:
 301        if (ret == -EFAULT) {
 302                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
 303                warn_bad_vsyscall(KERN_INFO, regs,
 304                                  "vsyscall fault (exploit attempt?)");
 305
 306                /*
 307                 * If we failed to generate a signal for any reason,
 308                 * generate one here.  (This should be impossible.)
 309                 */
 310                if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
 311                                 !sigismember(&tsk->pending.signal, SIGSEGV)))
 312                        goto sigsegv;
 313
 314                return true;  /* Don't emulate the ret. */
 315        }
 316
 317        regs->ax = ret;
 318
 319do_ret:
 320        /* Emulate a ret instruction. */
 321        regs->ip = caller;
 322        regs->sp += 8;
 323        return true;
 324
 325sigsegv:
 326        force_sig(SIGSEGV, current);
 327        return true;
 328}
 329
 330/*
 331 * Assume __initcall executes before all user space. Hopefully kmod
 332 * doesn't violate that. We'll find out if it does.
 333 */
 334static void vsyscall_set_cpu(int cpu)
 335{
 336        unsigned long d;
 337        unsigned long node = 0;
 338#ifdef CONFIG_NUMA
 339        node = cpu_to_node(cpu);
 340#endif
 341        if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
 342                write_rdtscp_aux((node << 12) | cpu);
 343
 344        /*
 345         * Store cpu number in limit so that it can be loaded quickly
 346         * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
 347         */
 348        d = 0x0f40000000000ULL;
 349        d |= cpu;
 350        d |= (node & 0xf) << 12;
 351        d |= (node >> 4) << 48;
 352
 353        write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
 354}
 355
 356static void cpu_vsyscall_init(void *arg)
 357{
 358        /* preemption should be already off */
 359        vsyscall_set_cpu(raw_smp_processor_id());
 360}
 361
 362static int
 363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 364{
 365        long cpu = (long)arg;
 366
 367        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
 368                smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
 369
 370        return NOTIFY_DONE;
 371}
 372
 373void __init map_vsyscall(void)
 374{
 375        extern char __vsyscall_page;
 376        unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
 377        extern char __vvar_page;
 378        unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
 379
 380        __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
 381                     vsyscall_mode == NATIVE
 382                     ? PAGE_KERNEL_VSYSCALL
 383                     : PAGE_KERNEL_VVAR);
 384        BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
 385                     (unsigned long)VSYSCALL_START);
 386
 387        __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
 388        BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
 389                     (unsigned long)VVAR_ADDRESS);
 390}
 391
 392static int __init vsyscall_init(void)
 393{
 394        BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
 395
 396        on_each_cpu(cpu_vsyscall_init, NULL, 1);
 397        /* notifier priority > KVM */
 398        hotcpu_notifier(cpu_vsyscall_notifier, 30);
 399
 400        return 0;
 401}
 402__initcall(vsyscall_init);
 403