linux/arch/arm64/kernel/armv8_deprecated.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2014 ARM Limited
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/cpu.h>
  10#include <linux/init.h>
  11#include <linux/list.h>
  12#include <linux/perf_event.h>
  13#include <linux/sched.h>
  14#include <linux/slab.h>
  15#include <linux/sysctl.h>
  16
  17#include <asm/insn.h>
  18#include <asm/opcodes.h>
  19#include <asm/system_misc.h>
  20#include <asm/traps.h>
  21#include <asm/uaccess.h>
  22#include <asm/cpufeature.h>
  23
  24#define CREATE_TRACE_POINTS
  25#include "trace-events-emulation.h"
  26
  27/*
  28 * The runtime support for deprecated instruction support can be in one of
  29 * following three states -
  30 *
  31 * 0 = undef
  32 * 1 = emulate (software emulation)
  33 * 2 = hw (supported in hardware)
  34 */
  35enum insn_emulation_mode {
  36        INSN_UNDEF,
  37        INSN_EMULATE,
  38        INSN_HW,
  39};
  40
  41enum legacy_insn_status {
  42        INSN_DEPRECATED,
  43        INSN_OBSOLETE,
  44};
  45
  46struct insn_emulation_ops {
  47        const char              *name;
  48        enum legacy_insn_status status;
  49        struct undef_hook       *hooks;
  50        int                     (*set_hw_mode)(bool enable);
  51};
  52
  53struct insn_emulation {
  54        struct list_head node;
  55        struct insn_emulation_ops *ops;
  56        int current_mode;
  57        int min;
  58        int max;
  59};
  60
  61static LIST_HEAD(insn_emulation);
  62static int nr_insn_emulated;
  63static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
  64
  65static void register_emulation_hooks(struct insn_emulation_ops *ops)
  66{
  67        struct undef_hook *hook;
  68
  69        BUG_ON(!ops->hooks);
  70
  71        for (hook = ops->hooks; hook->instr_mask; hook++)
  72                register_undef_hook(hook);
  73
  74        pr_notice("Registered %s emulation handler\n", ops->name);
  75}
  76
  77static void remove_emulation_hooks(struct insn_emulation_ops *ops)
  78{
  79        struct undef_hook *hook;
  80
  81        BUG_ON(!ops->hooks);
  82
  83        for (hook = ops->hooks; hook->instr_mask; hook++)
  84                unregister_undef_hook(hook);
  85
  86        pr_notice("Removed %s emulation handler\n", ops->name);
  87}
  88
  89static void enable_insn_hw_mode(void *data)
  90{
  91        struct insn_emulation *insn = (struct insn_emulation *)data;
  92        if (insn->ops->set_hw_mode)
  93                insn->ops->set_hw_mode(true);
  94}
  95
  96static void disable_insn_hw_mode(void *data)
  97{
  98        struct insn_emulation *insn = (struct insn_emulation *)data;
  99        if (insn->ops->set_hw_mode)
 100                insn->ops->set_hw_mode(false);
 101}
 102
 103/* Run set_hw_mode(mode) on all active CPUs */
 104static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
 105{
 106        if (!insn->ops->set_hw_mode)
 107                return -EINVAL;
 108        if (enable)
 109                on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
 110        else
 111                on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
 112        return 0;
 113}
 114
 115/*
 116 * Run set_hw_mode for all insns on a starting CPU.
 117 * Returns:
 118 *  0           - If all the hooks ran successfully.
 119 * -EINVAL      - At least one hook is not supported by the CPU.
 120 */
 121static int run_all_insn_set_hw_mode(unsigned long cpu)
 122{
 123        int rc = 0;
 124        unsigned long flags;
 125        struct insn_emulation *insn;
 126
 127        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
 128        list_for_each_entry(insn, &insn_emulation, node) {
 129                bool enable = (insn->current_mode == INSN_HW);
 130                if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
 131                        pr_warn("CPU[%ld] cannot support the emulation of %s",
 132                                cpu, insn->ops->name);
 133                        rc = -EINVAL;
 134                }
 135        }
 136        raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
 137        return rc;
 138}
 139
 140static int update_insn_emulation_mode(struct insn_emulation *insn,
 141                                       enum insn_emulation_mode prev)
 142{
 143        int ret = 0;
 144
 145        switch (prev) {
 146        case INSN_UNDEF: /* Nothing to be done */
 147                break;
 148        case INSN_EMULATE:
 149                remove_emulation_hooks(insn->ops);
 150                break;
 151        case INSN_HW:
 152                if (!run_all_cpu_set_hw_mode(insn, false))
 153                        pr_notice("Disabled %s support\n", insn->ops->name);
 154                break;
 155        }
 156
 157        switch (insn->current_mode) {
 158        case INSN_UNDEF:
 159                break;
 160        case INSN_EMULATE:
 161                register_emulation_hooks(insn->ops);
 162                break;
 163        case INSN_HW:
 164                ret = run_all_cpu_set_hw_mode(insn, true);
 165                if (!ret)
 166                        pr_notice("Enabled %s support\n", insn->ops->name);
 167                break;
 168        }
 169
 170        return ret;
 171}
 172
 173static void register_insn_emulation(struct insn_emulation_ops *ops)
 174{
 175        unsigned long flags;
 176        struct insn_emulation *insn;
 177
 178        insn = kzalloc(sizeof(*insn), GFP_KERNEL);
 179        insn->ops = ops;
 180        insn->min = INSN_UNDEF;
 181
 182        switch (ops->status) {
 183        case INSN_DEPRECATED:
 184                insn->current_mode = INSN_EMULATE;
 185                /* Disable the HW mode if it was turned on at early boot time */
 186                run_all_cpu_set_hw_mode(insn, false);
 187                insn->max = INSN_HW;
 188                break;
 189        case INSN_OBSOLETE:
 190                insn->current_mode = INSN_UNDEF;
 191                insn->max = INSN_EMULATE;
 192                break;
 193        }
 194
 195        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
 196        list_add(&insn->node, &insn_emulation);
 197        nr_insn_emulated++;
 198        raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
 199
 200        /* Register any handlers if required */
 201        update_insn_emulation_mode(insn, INSN_UNDEF);
 202}
 203
 204static int emulation_proc_handler(struct ctl_table *table, int write,
 205                                  void __user *buffer, size_t *lenp,
 206                                  loff_t *ppos)
 207{
 208        int ret = 0;
 209        struct insn_emulation *insn = (struct insn_emulation *) table->data;
 210        enum insn_emulation_mode prev_mode = insn->current_mode;
 211
 212        table->data = &insn->current_mode;
 213        ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 214
 215        if (ret || !write || prev_mode == insn->current_mode)
 216                goto ret;
 217
 218        ret = update_insn_emulation_mode(insn, prev_mode);
 219        if (ret) {
 220                /* Mode change failed, revert to previous mode. */
 221                insn->current_mode = prev_mode;
 222                update_insn_emulation_mode(insn, INSN_UNDEF);
 223        }
 224ret:
 225        table->data = insn;
 226        return ret;
 227}
 228
 229static struct ctl_table ctl_abi[] = {
 230        {
 231                .procname = "abi",
 232                .mode = 0555,
 233        },
 234        { }
 235};
 236
 237static void register_insn_emulation_sysctl(struct ctl_table *table)
 238{
 239        unsigned long flags;
 240        int i = 0;
 241        struct insn_emulation *insn;
 242        struct ctl_table *insns_sysctl, *sysctl;
 243
 244        insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1),
 245                              GFP_KERNEL);
 246
 247        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
 248        list_for_each_entry(insn, &insn_emulation, node) {
 249                sysctl = &insns_sysctl[i];
 250
 251                sysctl->mode = 0644;
 252                sysctl->maxlen = sizeof(int);
 253
 254                sysctl->procname = insn->ops->name;
 255                sysctl->data = insn;
 256                sysctl->extra1 = &insn->min;
 257                sysctl->extra2 = &insn->max;
 258                sysctl->proc_handler = emulation_proc_handler;
 259                i++;
 260        }
 261        raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
 262
 263        table->child = insns_sysctl;
 264        register_sysctl_table(table);
 265}
 266
 267/*
 268 *  Implement emulation of the SWP/SWPB instructions using load-exclusive and
 269 *  store-exclusive.
 270 *
 271 *  Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
 272 *  Where: Rt  = destination
 273 *         Rt2 = source
 274 *         Rn  = address
 275 */
 276
 277/*
 278 * Error-checking SWP macros implemented using ldxr{b}/stxr{b}
 279 */
 280#define __user_swpX_asm(data, addr, res, temp, B)               \
 281        __asm__ __volatile__(                                   \
 282        "       mov             %w2, %w1\n"                     \
 283        "0:     ldxr"B"         %w1, [%3]\n"                    \
 284        "1:     stxr"B"         %w0, %w2, [%3]\n"               \
 285        "       cbz             %w0, 2f\n"                      \
 286        "       mov             %w0, %w4\n"                     \
 287        "2:\n"                                                  \
 288        "       .pushsection     .fixup,\"ax\"\n"               \
 289        "       .align          2\n"                            \
 290        "3:     mov             %w0, %w5\n"                     \
 291        "       b               2b\n"                           \
 292        "       .popsection"                                    \
 293        "       .pushsection     __ex_table,\"a\"\n"            \
 294        "       .align          3\n"                            \
 295        "       .quad           0b, 3b\n"                       \
 296        "       .quad           1b, 3b\n"                       \
 297        "       .popsection"                                    \
 298        : "=&r" (res), "+r" (data), "=&r" (temp)                \
 299        : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)              \
 300        : "memory")
 301
 302#define __user_swp_asm(data, addr, res, temp) \
 303        __user_swpX_asm(data, addr, res, temp, "")
 304#define __user_swpb_asm(data, addr, res, temp) \
 305        __user_swpX_asm(data, addr, res, temp, "b")
 306
 307/*
 308 * Bit 22 of the instruction encoding distinguishes between
 309 * the SWP and SWPB variants (bit set means SWPB).
 310 */
 311#define TYPE_SWPB (1 << 22)
 312
 313/*
 314 * Set up process info to signal segmentation fault - called on access error.
 315 */
 316static void set_segfault(struct pt_regs *regs, unsigned long addr)
 317{
 318        siginfo_t info;
 319
 320        down_read(&current->mm->mmap_sem);
 321        if (find_vma(current->mm, addr) == NULL)
 322                info.si_code = SEGV_MAPERR;
 323        else
 324                info.si_code = SEGV_ACCERR;
 325        up_read(&current->mm->mmap_sem);
 326
 327        info.si_signo = SIGSEGV;
 328        info.si_errno = 0;
 329        info.si_addr  = (void *) instruction_pointer(regs);
 330
 331        pr_debug("SWP{B} emulation: access caused memory abort!\n");
 332        arm64_notify_die("Illegal memory access", regs, &info, 0);
 333}
 334
 335static int emulate_swpX(unsigned int address, unsigned int *data,
 336                        unsigned int type)
 337{
 338        unsigned int res = 0;
 339
 340        if ((type != TYPE_SWPB) && (address & 0x3)) {
 341                /* SWP to unaligned address not permitted */
 342                pr_debug("SWP instruction on unaligned pointer!\n");
 343                return -EFAULT;
 344        }
 345
 346        while (1) {
 347                unsigned long temp;
 348
 349                if (type == TYPE_SWPB)
 350                        __user_swpb_asm(*data, address, res, temp);
 351                else
 352                        __user_swp_asm(*data, address, res, temp);
 353
 354                if (likely(res != -EAGAIN) || signal_pending(current))
 355                        break;
 356
 357                cond_resched();
 358        }
 359
 360        return res;
 361}
 362
 363/*
 364 * swp_handler logs the id of calling process, dissects the instruction, sanity
 365 * checks the memory location, calls emulate_swpX for the actual operation and
 366 * deals with fixup/error handling before returning
 367 */
 368static int swp_handler(struct pt_regs *regs, u32 instr)
 369{
 370        u32 destreg, data, type, address = 0;
 371        int rn, rt2, res = 0;
 372
 373        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
 374
 375        type = instr & TYPE_SWPB;
 376
 377        switch (arm_check_condition(instr, regs->pstate)) {
 378        case ARM_OPCODE_CONDTEST_PASS:
 379                break;
 380        case ARM_OPCODE_CONDTEST_FAIL:
 381                /* Condition failed - return to next instruction */
 382                goto ret;
 383        case ARM_OPCODE_CONDTEST_UNCOND:
 384                /* If unconditional encoding - not a SWP, undef */
 385                return -EFAULT;
 386        default:
 387                return -EINVAL;
 388        }
 389
 390        rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET);
 391        rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET);
 392
 393        address = (u32)regs->user_regs.regs[rn];
 394        data    = (u32)regs->user_regs.regs[rt2];
 395        destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET);
 396
 397        pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
 398                rn, address, destreg,
 399                aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
 400
 401        /* Check access in reasonable access range for both SWP and SWPB */
 402        if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
 403                pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
 404                        address);
 405                goto fault;
 406        }
 407
 408        res = emulate_swpX(address, &data, type);
 409        if (res == -EFAULT)
 410                goto fault;
 411        else if (res == 0)
 412                regs->user_regs.regs[destreg] = data;
 413
 414ret:
 415        if (type == TYPE_SWPB)
 416                trace_instruction_emulation("swpb", regs->pc);
 417        else
 418                trace_instruction_emulation("swp", regs->pc);
 419
 420        pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
 421                        current->comm, (unsigned long)current->pid, regs->pc);
 422
 423        regs->pc += 4;
 424        return 0;
 425
 426fault:
 427        set_segfault(regs, address);
 428
 429        return 0;
 430}
 431
 432/*
 433 * Only emulate SWP/SWPB executed in ARM state/User mode.
 434 * The kernel must be SWP free and SWP{B} does not exist in Thumb.
 435 */
 436static struct undef_hook swp_hooks[] = {
 437        {
 438                .instr_mask     = 0x0fb00ff0,
 439                .instr_val      = 0x01000090,
 440                .pstate_mask    = COMPAT_PSR_MODE_MASK,
 441                .pstate_val     = COMPAT_PSR_MODE_USR,
 442                .fn             = swp_handler
 443        },
 444        { }
 445};
 446
 447static struct insn_emulation_ops swp_ops = {
 448        .name = "swp",
 449        .status = INSN_OBSOLETE,
 450        .hooks = swp_hooks,
 451        .set_hw_mode = NULL,
 452};
 453
 454static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
 455{
 456        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
 457
 458        switch (arm_check_condition(instr, regs->pstate)) {
 459        case ARM_OPCODE_CONDTEST_PASS:
 460                break;
 461        case ARM_OPCODE_CONDTEST_FAIL:
 462                /* Condition failed - return to next instruction */
 463                goto ret;
 464        case ARM_OPCODE_CONDTEST_UNCOND:
 465                /* If unconditional encoding - not a barrier instruction */
 466                return -EFAULT;
 467        default:
 468                return -EINVAL;
 469        }
 470
 471        switch (aarch32_insn_mcr_extract_crm(instr)) {
 472        case 10:
 473                /*
 474                 * dmb - mcr p15, 0, Rt, c7, c10, 5
 475                 * dsb - mcr p15, 0, Rt, c7, c10, 4
 476                 */
 477                if (aarch32_insn_mcr_extract_opc2(instr) == 5) {
 478                        dmb(sy);
 479                        trace_instruction_emulation(
 480                                "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc);
 481                } else {
 482                        dsb(sy);
 483                        trace_instruction_emulation(
 484                                "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc);
 485                }
 486                break;
 487        case 5:
 488                /*
 489                 * isb - mcr p15, 0, Rt, c7, c5, 4
 490                 *
 491                 * Taking an exception or returning from one acts as an
 492                 * instruction barrier. So no explicit barrier needed here.
 493                 */
 494                trace_instruction_emulation(
 495                        "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc);
 496                break;
 497        }
 498
 499ret:
 500        pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
 501                        current->comm, (unsigned long)current->pid, regs->pc);
 502
 503        regs->pc += 4;
 504        return 0;
 505}
 506
 507static inline void config_sctlr_el1(u32 clear, u32 set)
 508{
 509        u32 val;
 510
 511        asm volatile("mrs %0, sctlr_el1" : "=r" (val));
 512        val &= ~clear;
 513        val |= set;
 514        asm volatile("msr sctlr_el1, %0" : : "r" (val));
 515}
 516
 517static int cp15_barrier_set_hw_mode(bool enable)
 518{
 519        if (enable)
 520                config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
 521        else
 522                config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
 523        return 0;
 524}
 525
 526static struct undef_hook cp15_barrier_hooks[] = {
 527        {
 528                .instr_mask     = 0x0fff0fdf,
 529                .instr_val      = 0x0e070f9a,
 530                .pstate_mask    = COMPAT_PSR_MODE_MASK,
 531                .pstate_val     = COMPAT_PSR_MODE_USR,
 532                .fn             = cp15barrier_handler,
 533        },
 534        {
 535                .instr_mask     = 0x0fff0fff,
 536                .instr_val      = 0x0e070f95,
 537                .pstate_mask    = COMPAT_PSR_MODE_MASK,
 538                .pstate_val     = COMPAT_PSR_MODE_USR,
 539                .fn             = cp15barrier_handler,
 540        },
 541        { }
 542};
 543
 544static struct insn_emulation_ops cp15_barrier_ops = {
 545        .name = "cp15_barrier",
 546        .status = INSN_DEPRECATED,
 547        .hooks = cp15_barrier_hooks,
 548        .set_hw_mode = cp15_barrier_set_hw_mode,
 549};
 550
 551static int setend_set_hw_mode(bool enable)
 552{
 553        if (!cpu_supports_mixed_endian_el0())
 554                return -EINVAL;
 555
 556        if (enable)
 557                config_sctlr_el1(SCTLR_EL1_SED, 0);
 558        else
 559                config_sctlr_el1(0, SCTLR_EL1_SED);
 560        return 0;
 561}
 562
 563static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
 564{
 565        char *insn;
 566
 567        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
 568
 569        if (big_endian) {
 570                insn = "setend be";
 571                regs->pstate |= COMPAT_PSR_E_BIT;
 572        } else {
 573                insn = "setend le";
 574                regs->pstate &= ~COMPAT_PSR_E_BIT;
 575        }
 576
 577        trace_instruction_emulation(insn, regs->pc);
 578        pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
 579                        current->comm, (unsigned long)current->pid, regs->pc);
 580
 581        return 0;
 582}
 583
 584static int a32_setend_handler(struct pt_regs *regs, u32 instr)
 585{
 586        int rc = compat_setend_handler(regs, (instr >> 9) & 1);
 587        regs->pc += 4;
 588        return rc;
 589}
 590
 591static int t16_setend_handler(struct pt_regs *regs, u32 instr)
 592{
 593        int rc = compat_setend_handler(regs, (instr >> 3) & 1);
 594        regs->pc += 2;
 595        return rc;
 596}
 597
 598static struct undef_hook setend_hooks[] = {
 599        {
 600                .instr_mask     = 0xfffffdff,
 601                .instr_val      = 0xf1010000,
 602                .pstate_mask    = COMPAT_PSR_MODE_MASK,
 603                .pstate_val     = COMPAT_PSR_MODE_USR,
 604                .fn             = a32_setend_handler,
 605        },
 606        {
 607                /* Thumb mode */
 608                .instr_mask     = 0x0000fff7,
 609                .instr_val      = 0x0000b650,
 610                .pstate_mask    = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
 611                .pstate_val     = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
 612                .fn             = t16_setend_handler,
 613        },
 614        {}
 615};
 616
 617static struct insn_emulation_ops setend_ops = {
 618        .name = "setend",
 619        .status = INSN_DEPRECATED,
 620        .hooks = setend_hooks,
 621        .set_hw_mode = setend_set_hw_mode,
 622};
 623
 624static int insn_cpu_hotplug_notify(struct notifier_block *b,
 625                              unsigned long action, void *hcpu)
 626{
 627        int rc = 0;
 628        if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
 629                rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
 630
 631        return notifier_from_errno(rc);
 632}
 633
 634static struct notifier_block insn_cpu_hotplug_notifier = {
 635        .notifier_call = insn_cpu_hotplug_notify,
 636};
 637
 638/*
 639 * Invoked as late_initcall, since not needed before init spawned.
 640 */
 641static int __init armv8_deprecated_init(void)
 642{
 643        if (IS_ENABLED(CONFIG_SWP_EMULATION))
 644                register_insn_emulation(&swp_ops);
 645
 646        if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
 647                register_insn_emulation(&cp15_barrier_ops);
 648
 649        if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
 650                if(system_supports_mixed_endian_el0())
 651                        register_insn_emulation(&setend_ops);
 652                else
 653                        pr_info("setend instruction emulation is not supported on the system");
 654        }
 655
 656        register_cpu_notifier(&insn_cpu_hotplug_notifier);
 657        register_insn_emulation_sysctl(ctl_abi);
 658
 659        return 0;
 660}
 661
 662late_initcall(armv8_deprecated_init);
 663