linux/arch/arm64/kernel/insn.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Huawei Ltd.
   3 * Author: Jiang Liu <liuj97@gmail.com>
   4 *
   5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include <linux/bitops.h>
  20#include <linux/bug.h>
  21#include <linux/compiler.h>
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/smp.h>
  25#include <linux/spinlock.h>
  26#include <linux/stop_machine.h>
  27#include <linux/types.h>
  28#include <linux/uaccess.h>
  29
  30#include <asm/cacheflush.h>
  31#include <asm/debug-monitors.h>
  32#include <asm/fixmap.h>
  33#include <asm/insn.h>
  34#include <asm/kprobes.h>
  35
  36#define AARCH64_INSN_SF_BIT     BIT(31)
  37#define AARCH64_INSN_N_BIT      BIT(22)
  38#define AARCH64_INSN_LSL_12     BIT(22)
  39
  40static int aarch64_insn_encoding_class[] = {
  41        AARCH64_INSN_CLS_UNKNOWN,
  42        AARCH64_INSN_CLS_UNKNOWN,
  43        AARCH64_INSN_CLS_UNKNOWN,
  44        AARCH64_INSN_CLS_UNKNOWN,
  45        AARCH64_INSN_CLS_LDST,
  46        AARCH64_INSN_CLS_DP_REG,
  47        AARCH64_INSN_CLS_LDST,
  48        AARCH64_INSN_CLS_DP_FPSIMD,
  49        AARCH64_INSN_CLS_DP_IMM,
  50        AARCH64_INSN_CLS_DP_IMM,
  51        AARCH64_INSN_CLS_BR_SYS,
  52        AARCH64_INSN_CLS_BR_SYS,
  53        AARCH64_INSN_CLS_LDST,
  54        AARCH64_INSN_CLS_DP_REG,
  55        AARCH64_INSN_CLS_LDST,
  56        AARCH64_INSN_CLS_DP_FPSIMD,
  57};
  58
  59enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  60{
  61        return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  62}
  63
  64/* NOP is an alias of HINT */
  65bool __kprobes aarch64_insn_is_nop(u32 insn)
  66{
  67        if (!aarch64_insn_is_hint(insn))
  68                return false;
  69
  70        switch (insn & 0xFE0) {
  71        case AARCH64_INSN_HINT_XPACLRI:
  72        case AARCH64_INSN_HINT_PACIA_1716:
  73        case AARCH64_INSN_HINT_PACIB_1716:
  74        case AARCH64_INSN_HINT_AUTIA_1716:
  75        case AARCH64_INSN_HINT_AUTIB_1716:
  76        case AARCH64_INSN_HINT_PACIAZ:
  77        case AARCH64_INSN_HINT_PACIASP:
  78        case AARCH64_INSN_HINT_PACIBZ:
  79        case AARCH64_INSN_HINT_PACIBSP:
  80        case AARCH64_INSN_HINT_AUTIAZ:
  81        case AARCH64_INSN_HINT_AUTIASP:
  82        case AARCH64_INSN_HINT_AUTIBZ:
  83        case AARCH64_INSN_HINT_AUTIBSP:
  84        case AARCH64_INSN_HINT_BTI:
  85        case AARCH64_INSN_HINT_BTIC:
  86        case AARCH64_INSN_HINT_BTIJ:
  87        case AARCH64_INSN_HINT_BTIJC:
  88        case AARCH64_INSN_HINT_NOP:
  89                return true;
  90        default:
  91                return false;
  92        }
  93}
  94
  95bool aarch64_insn_is_branch_imm(u32 insn)
  96{
  97        return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  98                aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  99                aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
 100                aarch64_insn_is_bcond(insn));
 101}
 102
 103static DEFINE_RAW_SPINLOCK(patch_lock);
 104
 105static void __kprobes *patch_map(void *addr, int fixmap)
 106{
 107        unsigned long uintaddr = (uintptr_t) addr;
 108        bool module = !core_kernel_text(uintaddr);
 109        struct page *page;
 110
 111        if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
 112                page = vmalloc_to_page(addr);
 113        else if (!module)
 114                page = phys_to_page(__pa_symbol(addr));
 115        else
 116                return addr;
 117
 118        BUG_ON(!page);
 119        return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
 120                        (uintaddr & ~PAGE_MASK));
 121}
 122
 123static void __kprobes patch_unmap(int fixmap)
 124{
 125        clear_fixmap(fixmap);
 126}
 127/*
 128 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
 129 * little-endian.
 130 */
 131int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
 132{
 133        int ret;
 134        __le32 val;
 135
 136        ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
 137        if (!ret)
 138                *insnp = le32_to_cpu(val);
 139
 140        return ret;
 141}
 142
 143static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
 144{
 145        void *waddr = addr;
 146        unsigned long flags = 0;
 147        int ret;
 148
 149        raw_spin_lock_irqsave(&patch_lock, flags);
 150        waddr = patch_map(addr, FIX_TEXT_POKE0);
 151
 152        ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
 153
 154        patch_unmap(FIX_TEXT_POKE0);
 155        raw_spin_unlock_irqrestore(&patch_lock, flags);
 156
 157        return ret;
 158}
 159
 160int __kprobes aarch64_insn_write(void *addr, u32 insn)
 161{
 162        return __aarch64_insn_write(addr, cpu_to_le32(insn));
 163}
 164
 165static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
 166{
 167        if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
 168                return false;
 169
 170        return  aarch64_insn_is_b(insn) ||
 171                aarch64_insn_is_bl(insn) ||
 172                aarch64_insn_is_svc(insn) ||
 173                aarch64_insn_is_hvc(insn) ||
 174                aarch64_insn_is_smc(insn) ||
 175                aarch64_insn_is_brk(insn) ||
 176                aarch64_insn_is_nop(insn);
 177}
 178
 179bool __kprobes aarch64_insn_uses_literal(u32 insn)
 180{
 181        /* ldr/ldrsw (literal), prfm */
 182
 183        return aarch64_insn_is_ldr_lit(insn) ||
 184                aarch64_insn_is_ldrsw_lit(insn) ||
 185                aarch64_insn_is_adr_adrp(insn) ||
 186                aarch64_insn_is_prfm_lit(insn);
 187}
 188
 189bool __kprobes aarch64_insn_is_branch(u32 insn)
 190{
 191        /* b, bl, cb*, tb*, b.cond, br, blr */
 192
 193        return aarch64_insn_is_b(insn) ||
 194                aarch64_insn_is_bl(insn) ||
 195                aarch64_insn_is_cbz(insn) ||
 196                aarch64_insn_is_cbnz(insn) ||
 197                aarch64_insn_is_tbz(insn) ||
 198                aarch64_insn_is_tbnz(insn) ||
 199                aarch64_insn_is_ret(insn) ||
 200                aarch64_insn_is_br(insn) ||
 201                aarch64_insn_is_blr(insn) ||
 202                aarch64_insn_is_bcond(insn);
 203}
 204
 205/*
 206 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
 207 * Section B2.6.5 "Concurrent modification and execution of instructions":
 208 * Concurrent modification and execution of instructions can lead to the
 209 * resulting instruction performing any behavior that can be achieved by
 210 * executing any sequence of instructions that can be executed from the
 211 * same Exception level, except where the instruction before modification
 212 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
 213 * or SMC instruction.
 214 */
 215bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
 216{
 217        return __aarch64_insn_hotpatch_safe(old_insn) &&
 218               __aarch64_insn_hotpatch_safe(new_insn);
 219}
 220
 221int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
 222{
 223        u32 *tp = addr;
 224        int ret;
 225
 226        /* A64 instructions must be word aligned */
 227        if ((uintptr_t)tp & 0x3)
 228                return -EINVAL;
 229
 230        ret = aarch64_insn_write(tp, insn);
 231        if (ret == 0)
 232                __flush_icache_range((uintptr_t)tp,
 233                                     (uintptr_t)tp + AARCH64_INSN_SIZE);
 234
 235        return ret;
 236}
 237
 238struct aarch64_insn_patch {
 239        void            **text_addrs;
 240        u32             *new_insns;
 241        int             insn_cnt;
 242        atomic_t        cpu_count;
 243};
 244
 245static int __kprobes aarch64_insn_patch_text_cb(void *arg)
 246{
 247        int i, ret = 0;
 248        struct aarch64_insn_patch *pp = arg;
 249
 250        /* The first CPU becomes master */
 251        if (atomic_inc_return(&pp->cpu_count) == 1) {
 252                for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
 253                        ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
 254                                                             pp->new_insns[i]);
 255                /*
 256                 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
 257                 * which ends with "dsb; isb" pair guaranteeing global
 258                 * visibility.
 259                 */
 260                /* Notify other processors with an additional increment. */
 261                atomic_inc(&pp->cpu_count);
 262        } else {
 263                while (atomic_read(&pp->cpu_count) <= num_online_cpus())
 264                        cpu_relax();
 265                isb();
 266        }
 267
 268        return ret;
 269}
 270
 271static
 272int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
 273{
 274        struct aarch64_insn_patch patch = {
 275                .text_addrs = addrs,
 276                .new_insns = insns,
 277                .insn_cnt = cnt,
 278                .cpu_count = ATOMIC_INIT(0),
 279        };
 280
 281        if (cnt <= 0)
 282                return -EINVAL;
 283
 284        return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
 285                                       cpu_online_mask);
 286}
 287
 288int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
 289{
 290        int ret;
 291        u32 insn;
 292
 293        /* Unsafe to patch multiple instructions without synchronizaiton */
 294        if (cnt == 1) {
 295                ret = aarch64_insn_read(addrs[0], &insn);
 296                if (ret)
 297                        return ret;
 298
 299                if (aarch64_insn_hotpatch_safe(insn, insns[0]))
 300                        return aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
 301        }
 302
 303        return aarch64_insn_patch_text_sync(addrs, insns, cnt);
 304}
 305
 306static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
 307                                                u32 *maskp, int *shiftp)
 308{
 309        u32 mask;
 310        int shift;
 311
 312        switch (type) {
 313        case AARCH64_INSN_IMM_26:
 314                mask = BIT(26) - 1;
 315                shift = 0;
 316                break;
 317        case AARCH64_INSN_IMM_19:
 318                mask = BIT(19) - 1;
 319                shift = 5;
 320                break;
 321        case AARCH64_INSN_IMM_16:
 322                mask = BIT(16) - 1;
 323                shift = 5;
 324                break;
 325        case AARCH64_INSN_IMM_14:
 326                mask = BIT(14) - 1;
 327                shift = 5;
 328                break;
 329        case AARCH64_INSN_IMM_12:
 330                mask = BIT(12) - 1;
 331                shift = 10;
 332                break;
 333        case AARCH64_INSN_IMM_9:
 334                mask = BIT(9) - 1;
 335                shift = 12;
 336                break;
 337        case AARCH64_INSN_IMM_7:
 338                mask = BIT(7) - 1;
 339                shift = 15;
 340                break;
 341        case AARCH64_INSN_IMM_6:
 342        case AARCH64_INSN_IMM_S:
 343                mask = BIT(6) - 1;
 344                shift = 10;
 345                break;
 346        case AARCH64_INSN_IMM_R:
 347                mask = BIT(6) - 1;
 348                shift = 16;
 349                break;
 350        case AARCH64_INSN_IMM_N:
 351                mask = 1;
 352                shift = 22;
 353                break;
 354        default:
 355                return -EINVAL;
 356        }
 357
 358        *maskp = mask;
 359        *shiftp = shift;
 360
 361        return 0;
 362}
 363
 364#define ADR_IMM_HILOSPLIT       2
 365#define ADR_IMM_SIZE            SZ_2M
 366#define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
 367#define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
 368#define ADR_IMM_LOSHIFT         29
 369#define ADR_IMM_HISHIFT         5
 370
 371u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
 372{
 373        u32 immlo, immhi, mask;
 374        int shift;
 375
 376        switch (type) {
 377        case AARCH64_INSN_IMM_ADR:
 378                shift = 0;
 379                immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
 380                immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
 381                insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
 382                mask = ADR_IMM_SIZE - 1;
 383                break;
 384        default:
 385                if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 386                        pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
 387                               type);
 388                        return 0;
 389                }
 390        }
 391
 392        return (insn >> shift) & mask;
 393}
 394
 395u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 396                                  u32 insn, u64 imm)
 397{
 398        u32 immlo, immhi, mask;
 399        int shift;
 400
 401        if (insn == AARCH64_BREAK_FAULT)
 402                return AARCH64_BREAK_FAULT;
 403
 404        switch (type) {
 405        case AARCH64_INSN_IMM_ADR:
 406                shift = 0;
 407                immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
 408                imm >>= ADR_IMM_HILOSPLIT;
 409                immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
 410                imm = immlo | immhi;
 411                mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
 412                        (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
 413                break;
 414        default:
 415                if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 416                        pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 417                               type);
 418                        return AARCH64_BREAK_FAULT;
 419                }
 420        }
 421
 422        /* Update the immediate field. */
 423        insn &= ~(mask << shift);
 424        insn |= (imm & mask) << shift;
 425
 426        return insn;
 427}
 428
 429u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
 430                                        u32 insn)
 431{
 432        int shift;
 433
 434        switch (type) {
 435        case AARCH64_INSN_REGTYPE_RT:
 436        case AARCH64_INSN_REGTYPE_RD:
 437                shift = 0;
 438                break;
 439        case AARCH64_INSN_REGTYPE_RN:
 440                shift = 5;
 441                break;
 442        case AARCH64_INSN_REGTYPE_RT2:
 443        case AARCH64_INSN_REGTYPE_RA:
 444                shift = 10;
 445                break;
 446        case AARCH64_INSN_REGTYPE_RM:
 447                shift = 16;
 448                break;
 449        default:
 450                pr_err("%s: unknown register type encoding %d\n", __func__,
 451                       type);
 452                return 0;
 453        }
 454
 455        return (insn >> shift) & GENMASK(4, 0);
 456}
 457
 458static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 459                                        u32 insn,
 460                                        enum aarch64_insn_register reg)
 461{
 462        int shift;
 463
 464        if (insn == AARCH64_BREAK_FAULT)
 465                return AARCH64_BREAK_FAULT;
 466
 467        if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
 468                pr_err("%s: unknown register encoding %d\n", __func__, reg);
 469                return AARCH64_BREAK_FAULT;
 470        }
 471
 472        switch (type) {
 473        case AARCH64_INSN_REGTYPE_RT:
 474        case AARCH64_INSN_REGTYPE_RD:
 475                shift = 0;
 476                break;
 477        case AARCH64_INSN_REGTYPE_RN:
 478                shift = 5;
 479                break;
 480        case AARCH64_INSN_REGTYPE_RT2:
 481        case AARCH64_INSN_REGTYPE_RA:
 482                shift = 10;
 483                break;
 484        case AARCH64_INSN_REGTYPE_RM:
 485        case AARCH64_INSN_REGTYPE_RS:
 486                shift = 16;
 487                break;
 488        default:
 489                pr_err("%s: unknown register type encoding %d\n", __func__,
 490                       type);
 491                return AARCH64_BREAK_FAULT;
 492        }
 493
 494        insn &= ~(GENMASK(4, 0) << shift);
 495        insn |= reg << shift;
 496
 497        return insn;
 498}
 499
 500static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
 501                                         u32 insn)
 502{
 503        u32 size;
 504
 505        switch (type) {
 506        case AARCH64_INSN_SIZE_8:
 507                size = 0;
 508                break;
 509        case AARCH64_INSN_SIZE_16:
 510                size = 1;
 511                break;
 512        case AARCH64_INSN_SIZE_32:
 513                size = 2;
 514                break;
 515        case AARCH64_INSN_SIZE_64:
 516                size = 3;
 517                break;
 518        default:
 519                pr_err("%s: unknown size encoding %d\n", __func__, type);
 520                return AARCH64_BREAK_FAULT;
 521        }
 522
 523        insn &= ~GENMASK(31, 30);
 524        insn |= size << 30;
 525
 526        return insn;
 527}
 528
 529static inline long branch_imm_common(unsigned long pc, unsigned long addr,
 530                                     long range)
 531{
 532        long offset;
 533
 534        if ((pc & 0x3) || (addr & 0x3)) {
 535                pr_err("%s: A64 instructions must be word aligned\n", __func__);
 536                return range;
 537        }
 538
 539        offset = ((long)addr - (long)pc);
 540
 541        if (offset < -range || offset >= range) {
 542                pr_err("%s: offset out of range\n", __func__);
 543                return range;
 544        }
 545
 546        return offset;
 547}
 548
 549u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 550                                          enum aarch64_insn_branch_type type)
 551{
 552        u32 insn;
 553        long offset;
 554
 555        /*
 556         * B/BL support [-128M, 128M) offset
 557         * ARM64 virtual address arrangement guarantees all kernel and module
 558         * texts are within +/-128M.
 559         */
 560        offset = branch_imm_common(pc, addr, SZ_128M);
 561        if (offset >= SZ_128M)
 562                return AARCH64_BREAK_FAULT;
 563
 564        switch (type) {
 565        case AARCH64_INSN_BRANCH_LINK:
 566                insn = aarch64_insn_get_bl_value();
 567                break;
 568        case AARCH64_INSN_BRANCH_NOLINK:
 569                insn = aarch64_insn_get_b_value();
 570                break;
 571        default:
 572                pr_err("%s: unknown branch encoding %d\n", __func__, type);
 573                return AARCH64_BREAK_FAULT;
 574        }
 575
 576        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
 577                                             offset >> 2);
 578}
 579
 580u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 581                                     enum aarch64_insn_register reg,
 582                                     enum aarch64_insn_variant variant,
 583                                     enum aarch64_insn_branch_type type)
 584{
 585        u32 insn;
 586        long offset;
 587
 588        offset = branch_imm_common(pc, addr, SZ_1M);
 589        if (offset >= SZ_1M)
 590                return AARCH64_BREAK_FAULT;
 591
 592        switch (type) {
 593        case AARCH64_INSN_BRANCH_COMP_ZERO:
 594                insn = aarch64_insn_get_cbz_value();
 595                break;
 596        case AARCH64_INSN_BRANCH_COMP_NONZERO:
 597                insn = aarch64_insn_get_cbnz_value();
 598                break;
 599        default:
 600                pr_err("%s: unknown branch encoding %d\n", __func__, type);
 601                return AARCH64_BREAK_FAULT;
 602        }
 603
 604        switch (variant) {
 605        case AARCH64_INSN_VARIANT_32BIT:
 606                break;
 607        case AARCH64_INSN_VARIANT_64BIT:
 608                insn |= AARCH64_INSN_SF_BIT;
 609                break;
 610        default:
 611                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 612                return AARCH64_BREAK_FAULT;
 613        }
 614
 615        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 616
 617        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 618                                             offset >> 2);
 619}
 620
 621u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
 622                                     enum aarch64_insn_condition cond)
 623{
 624        u32 insn;
 625        long offset;
 626
 627        offset = branch_imm_common(pc, addr, SZ_1M);
 628
 629        insn = aarch64_insn_get_bcond_value();
 630
 631        if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
 632                pr_err("%s: unknown condition encoding %d\n", __func__, cond);
 633                return AARCH64_BREAK_FAULT;
 634        }
 635        insn |= cond;
 636
 637        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 638                                             offset >> 2);
 639}
 640
 641u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
 642{
 643        return aarch64_insn_get_hint_value() | op;
 644}
 645
 646u32 __kprobes aarch64_insn_gen_nop(void)
 647{
 648        return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
 649}
 650
 651u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
 652                                enum aarch64_insn_branch_type type)
 653{
 654        u32 insn;
 655
 656        switch (type) {
 657        case AARCH64_INSN_BRANCH_NOLINK:
 658                insn = aarch64_insn_get_br_value();
 659                break;
 660        case AARCH64_INSN_BRANCH_LINK:
 661                insn = aarch64_insn_get_blr_value();
 662                break;
 663        case AARCH64_INSN_BRANCH_RETURN:
 664                insn = aarch64_insn_get_ret_value();
 665                break;
 666        default:
 667                pr_err("%s: unknown branch encoding %d\n", __func__, type);
 668                return AARCH64_BREAK_FAULT;
 669        }
 670
 671        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
 672}
 673
 674u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
 675                                    enum aarch64_insn_register base,
 676                                    enum aarch64_insn_register offset,
 677                                    enum aarch64_insn_size_type size,
 678                                    enum aarch64_insn_ldst_type type)
 679{
 680        u32 insn;
 681
 682        switch (type) {
 683        case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
 684                insn = aarch64_insn_get_ldr_reg_value();
 685                break;
 686        case AARCH64_INSN_LDST_STORE_REG_OFFSET:
 687                insn = aarch64_insn_get_str_reg_value();
 688                break;
 689        default:
 690                pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 691                return AARCH64_BREAK_FAULT;
 692        }
 693
 694        insn = aarch64_insn_encode_ldst_size(size, insn);
 695
 696        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 697
 698        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 699                                            base);
 700
 701        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
 702                                            offset);
 703}
 704
 705u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
 706                                     enum aarch64_insn_register reg2,
 707                                     enum aarch64_insn_register base,
 708                                     int offset,
 709                                     enum aarch64_insn_variant variant,
 710                                     enum aarch64_insn_ldst_type type)
 711{
 712        u32 insn;
 713        int shift;
 714
 715        switch (type) {
 716        case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
 717                insn = aarch64_insn_get_ldp_pre_value();
 718                break;
 719        case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
 720                insn = aarch64_insn_get_stp_pre_value();
 721                break;
 722        case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
 723                insn = aarch64_insn_get_ldp_post_value();
 724                break;
 725        case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
 726                insn = aarch64_insn_get_stp_post_value();
 727                break;
 728        default:
 729                pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 730                return AARCH64_BREAK_FAULT;
 731        }
 732
 733        switch (variant) {
 734        case AARCH64_INSN_VARIANT_32BIT:
 735                if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
 736                        pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
 737                               __func__, offset);
 738                        return AARCH64_BREAK_FAULT;
 739                }
 740                shift = 2;
 741                break;
 742        case AARCH64_INSN_VARIANT_64BIT:
 743                if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
 744                        pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
 745                               __func__, offset);
 746                        return AARCH64_BREAK_FAULT;
 747                }
 748                shift = 3;
 749                insn |= AARCH64_INSN_SF_BIT;
 750                break;
 751        default:
 752                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 753                return AARCH64_BREAK_FAULT;
 754        }
 755
 756        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 757                                            reg1);
 758
 759        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 760                                            reg2);
 761
 762        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 763                                            base);
 764
 765        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
 766                                             offset >> shift);
 767}
 768
 769u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 770                                   enum aarch64_insn_register base,
 771                                   enum aarch64_insn_register state,
 772                                   enum aarch64_insn_size_type size,
 773                                   enum aarch64_insn_ldst_type type)
 774{
 775        u32 insn;
 776
 777        switch (type) {
 778        case AARCH64_INSN_LDST_LOAD_EX:
 779                insn = aarch64_insn_get_load_ex_value();
 780                break;
 781        case AARCH64_INSN_LDST_STORE_EX:
 782                insn = aarch64_insn_get_store_ex_value();
 783                break;
 784        default:
 785                pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
 786                return AARCH64_BREAK_FAULT;
 787        }
 788
 789        insn = aarch64_insn_encode_ldst_size(size, insn);
 790
 791        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 792                                            reg);
 793
 794        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 795                                            base);
 796
 797        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 798                                            AARCH64_INSN_REG_ZR);
 799
 800        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
 801                                            state);
 802}
 803
 804u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
 805                           enum aarch64_insn_register address,
 806                           enum aarch64_insn_register value,
 807                           enum aarch64_insn_size_type size)
 808{
 809        u32 insn = aarch64_insn_get_ldadd_value();
 810
 811        switch (size) {
 812        case AARCH64_INSN_SIZE_32:
 813        case AARCH64_INSN_SIZE_64:
 814                break;
 815        default:
 816                pr_err("%s: unimplemented size encoding %d\n", __func__, size);
 817                return AARCH64_BREAK_FAULT;
 818        }
 819
 820        insn = aarch64_insn_encode_ldst_size(size, insn);
 821
 822        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 823                                            result);
 824
 825        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 826                                            address);
 827
 828        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
 829                                            value);
 830}
 831
 832u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
 833                           enum aarch64_insn_register value,
 834                           enum aarch64_insn_size_type size)
 835{
 836        /*
 837         * STADD is simply encoded as an alias for LDADD with XZR as
 838         * the destination register.
 839         */
 840        return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
 841                                      value, size);
 842}
 843
 844static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
 845                                        enum aarch64_insn_prfm_target target,
 846                                        enum aarch64_insn_prfm_policy policy,
 847                                        u32 insn)
 848{
 849        u32 imm_type = 0, imm_target = 0, imm_policy = 0;
 850
 851        switch (type) {
 852        case AARCH64_INSN_PRFM_TYPE_PLD:
 853                break;
 854        case AARCH64_INSN_PRFM_TYPE_PLI:
 855                imm_type = BIT(0);
 856                break;
 857        case AARCH64_INSN_PRFM_TYPE_PST:
 858                imm_type = BIT(1);
 859                break;
 860        default:
 861                pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
 862                return AARCH64_BREAK_FAULT;
 863        }
 864
 865        switch (target) {
 866        case AARCH64_INSN_PRFM_TARGET_L1:
 867                break;
 868        case AARCH64_INSN_PRFM_TARGET_L2:
 869                imm_target = BIT(0);
 870                break;
 871        case AARCH64_INSN_PRFM_TARGET_L3:
 872                imm_target = BIT(1);
 873                break;
 874        default:
 875                pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
 876                return AARCH64_BREAK_FAULT;
 877        }
 878
 879        switch (policy) {
 880        case AARCH64_INSN_PRFM_POLICY_KEEP:
 881                break;
 882        case AARCH64_INSN_PRFM_POLICY_STRM:
 883                imm_policy = BIT(0);
 884                break;
 885        default:
 886                pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
 887                return AARCH64_BREAK_FAULT;
 888        }
 889
 890        /* In this case, imm5 is encoded into Rt field. */
 891        insn &= ~GENMASK(4, 0);
 892        insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
 893
 894        return insn;
 895}
 896
 897u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 898                              enum aarch64_insn_prfm_type type,
 899                              enum aarch64_insn_prfm_target target,
 900                              enum aarch64_insn_prfm_policy policy)
 901{
 902        u32 insn = aarch64_insn_get_prfm_value();
 903
 904        insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
 905
 906        insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
 907
 908        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 909                                            base);
 910
 911        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
 912}
 913
 914u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 915                                 enum aarch64_insn_register src,
 916                                 int imm, enum aarch64_insn_variant variant,
 917                                 enum aarch64_insn_adsb_type type)
 918{
 919        u32 insn;
 920
 921        switch (type) {
 922        case AARCH64_INSN_ADSB_ADD:
 923                insn = aarch64_insn_get_add_imm_value();
 924                break;
 925        case AARCH64_INSN_ADSB_SUB:
 926                insn = aarch64_insn_get_sub_imm_value();
 927                break;
 928        case AARCH64_INSN_ADSB_ADD_SETFLAGS:
 929                insn = aarch64_insn_get_adds_imm_value();
 930                break;
 931        case AARCH64_INSN_ADSB_SUB_SETFLAGS:
 932                insn = aarch64_insn_get_subs_imm_value();
 933                break;
 934        default:
 935                pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
 936                return AARCH64_BREAK_FAULT;
 937        }
 938
 939        switch (variant) {
 940        case AARCH64_INSN_VARIANT_32BIT:
 941                break;
 942        case AARCH64_INSN_VARIANT_64BIT:
 943                insn |= AARCH64_INSN_SF_BIT;
 944                break;
 945        default:
 946                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 947                return AARCH64_BREAK_FAULT;
 948        }
 949
 950        /* We can't encode more than a 24bit value (12bit + 12bit shift) */
 951        if (imm & ~(BIT(24) - 1))
 952                goto out;
 953
 954        /* If we have something in the top 12 bits... */
 955        if (imm & ~(SZ_4K - 1)) {
 956                /* ... and in the low 12 bits -> error */
 957                if (imm & (SZ_4K - 1))
 958                        goto out;
 959
 960                imm >>= 12;
 961                insn |= AARCH64_INSN_LSL_12;
 962        }
 963
 964        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 965
 966        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 967
 968        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
 969
 970out:
 971        pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
 972        return AARCH64_BREAK_FAULT;
 973}
 974
 975u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
 976                              enum aarch64_insn_register src,
 977                              int immr, int imms,
 978                              enum aarch64_insn_variant variant,
 979                              enum aarch64_insn_bitfield_type type)
 980{
 981        u32 insn;
 982        u32 mask;
 983
 984        switch (type) {
 985        case AARCH64_INSN_BITFIELD_MOVE:
 986                insn = aarch64_insn_get_bfm_value();
 987                break;
 988        case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
 989                insn = aarch64_insn_get_ubfm_value();
 990                break;
 991        case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
 992                insn = aarch64_insn_get_sbfm_value();
 993                break;
 994        default:
 995                pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
 996                return AARCH64_BREAK_FAULT;
 997        }
 998
 999        switch (variant) {
1000        case AARCH64_INSN_VARIANT_32BIT:
1001                mask = GENMASK(4, 0);
1002                break;
1003        case AARCH64_INSN_VARIANT_64BIT:
1004                insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
1005                mask = GENMASK(5, 0);
1006                break;
1007        default:
1008                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1009                return AARCH64_BREAK_FAULT;
1010        }
1011
1012        if (immr & ~mask) {
1013                pr_err("%s: invalid immr encoding %d\n", __func__, immr);
1014                return AARCH64_BREAK_FAULT;
1015        }
1016        if (imms & ~mask) {
1017                pr_err("%s: invalid imms encoding %d\n", __func__, imms);
1018                return AARCH64_BREAK_FAULT;
1019        }
1020
1021        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1022
1023        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1024
1025        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1026
1027        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1028}
1029
1030u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
1031                              int imm, int shift,
1032                              enum aarch64_insn_variant variant,
1033                              enum aarch64_insn_movewide_type type)
1034{
1035        u32 insn;
1036
1037        switch (type) {
1038        case AARCH64_INSN_MOVEWIDE_ZERO:
1039                insn = aarch64_insn_get_movz_value();
1040                break;
1041        case AARCH64_INSN_MOVEWIDE_KEEP:
1042                insn = aarch64_insn_get_movk_value();
1043                break;
1044        case AARCH64_INSN_MOVEWIDE_INVERSE:
1045                insn = aarch64_insn_get_movn_value();
1046                break;
1047        default:
1048                pr_err("%s: unknown movewide encoding %d\n", __func__, type);
1049                return AARCH64_BREAK_FAULT;
1050        }
1051
1052        if (imm & ~(SZ_64K - 1)) {
1053                pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1054                return AARCH64_BREAK_FAULT;
1055        }
1056
1057        switch (variant) {
1058        case AARCH64_INSN_VARIANT_32BIT:
1059                if (shift != 0 && shift != 16) {
1060                        pr_err("%s: invalid shift encoding %d\n", __func__,
1061                               shift);
1062                        return AARCH64_BREAK_FAULT;
1063                }
1064                break;
1065        case AARCH64_INSN_VARIANT_64BIT:
1066                insn |= AARCH64_INSN_SF_BIT;
1067                if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1068                        pr_err("%s: invalid shift encoding %d\n", __func__,
1069                               shift);
1070                        return AARCH64_BREAK_FAULT;
1071                }
1072                break;
1073        default:
1074                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1075                return AARCH64_BREAK_FAULT;
1076        }
1077
1078        insn |= (shift >> 4) << 21;
1079
1080        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1081
1082        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1083}
1084
1085u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1086                                         enum aarch64_insn_register src,
1087                                         enum aarch64_insn_register reg,
1088                                         int shift,
1089                                         enum aarch64_insn_variant variant,
1090                                         enum aarch64_insn_adsb_type type)
1091{
1092        u32 insn;
1093
1094        switch (type) {
1095        case AARCH64_INSN_ADSB_ADD:
1096                insn = aarch64_insn_get_add_value();
1097                break;
1098        case AARCH64_INSN_ADSB_SUB:
1099                insn = aarch64_insn_get_sub_value();
1100                break;
1101        case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1102                insn = aarch64_insn_get_adds_value();
1103                break;
1104        case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1105                insn = aarch64_insn_get_subs_value();
1106                break;
1107        default:
1108                pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1109                return AARCH64_BREAK_FAULT;
1110        }
1111
1112        switch (variant) {
1113        case AARCH64_INSN_VARIANT_32BIT:
1114                if (shift & ~(SZ_32 - 1)) {
1115                        pr_err("%s: invalid shift encoding %d\n", __func__,
1116                               shift);
1117                        return AARCH64_BREAK_FAULT;
1118                }
1119                break;
1120        case AARCH64_INSN_VARIANT_64BIT:
1121                insn |= AARCH64_INSN_SF_BIT;
1122                if (shift & ~(SZ_64 - 1)) {
1123                        pr_err("%s: invalid shift encoding %d\n", __func__,
1124                               shift);
1125                        return AARCH64_BREAK_FAULT;
1126                }
1127                break;
1128        default:
1129                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1130                return AARCH64_BREAK_FAULT;
1131        }
1132
1133
1134        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1135
1136        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1137
1138        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1139
1140        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1141}
1142
1143u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1144                           enum aarch64_insn_register src,
1145                           enum aarch64_insn_variant variant,
1146                           enum aarch64_insn_data1_type type)
1147{
1148        u32 insn;
1149
1150        switch (type) {
1151        case AARCH64_INSN_DATA1_REVERSE_16:
1152                insn = aarch64_insn_get_rev16_value();
1153                break;
1154        case AARCH64_INSN_DATA1_REVERSE_32:
1155                insn = aarch64_insn_get_rev32_value();
1156                break;
1157        case AARCH64_INSN_DATA1_REVERSE_64:
1158                if (variant != AARCH64_INSN_VARIANT_64BIT) {
1159                        pr_err("%s: invalid variant for reverse64 %d\n",
1160                               __func__, variant);
1161                        return AARCH64_BREAK_FAULT;
1162                }
1163                insn = aarch64_insn_get_rev64_value();
1164                break;
1165        default:
1166                pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1167                return AARCH64_BREAK_FAULT;
1168        }
1169
1170        switch (variant) {
1171        case AARCH64_INSN_VARIANT_32BIT:
1172                break;
1173        case AARCH64_INSN_VARIANT_64BIT:
1174                insn |= AARCH64_INSN_SF_BIT;
1175                break;
1176        default:
1177                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1178                return AARCH64_BREAK_FAULT;
1179        }
1180
1181        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1182
1183        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1184}
1185
1186u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1187                           enum aarch64_insn_register src,
1188                           enum aarch64_insn_register reg,
1189                           enum aarch64_insn_variant variant,
1190                           enum aarch64_insn_data2_type type)
1191{
1192        u32 insn;
1193
1194        switch (type) {
1195        case AARCH64_INSN_DATA2_UDIV:
1196                insn = aarch64_insn_get_udiv_value();
1197                break;
1198        case AARCH64_INSN_DATA2_SDIV:
1199                insn = aarch64_insn_get_sdiv_value();
1200                break;
1201        case AARCH64_INSN_DATA2_LSLV:
1202                insn = aarch64_insn_get_lslv_value();
1203                break;
1204        case AARCH64_INSN_DATA2_LSRV:
1205                insn = aarch64_insn_get_lsrv_value();
1206                break;
1207        case AARCH64_INSN_DATA2_ASRV:
1208                insn = aarch64_insn_get_asrv_value();
1209                break;
1210        case AARCH64_INSN_DATA2_RORV:
1211                insn = aarch64_insn_get_rorv_value();
1212                break;
1213        default:
1214                pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1215                return AARCH64_BREAK_FAULT;
1216        }
1217
1218        switch (variant) {
1219        case AARCH64_INSN_VARIANT_32BIT:
1220                break;
1221        case AARCH64_INSN_VARIANT_64BIT:
1222                insn |= AARCH64_INSN_SF_BIT;
1223                break;
1224        default:
1225                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1226                return AARCH64_BREAK_FAULT;
1227        }
1228
1229        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1230
1231        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1232
1233        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1234}
1235
1236u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1237                           enum aarch64_insn_register src,
1238                           enum aarch64_insn_register reg1,
1239                           enum aarch64_insn_register reg2,
1240                           enum aarch64_insn_variant variant,
1241                           enum aarch64_insn_data3_type type)
1242{
1243        u32 insn;
1244
1245        switch (type) {
1246        case AARCH64_INSN_DATA3_MADD:
1247                insn = aarch64_insn_get_madd_value();
1248                break;
1249        case AARCH64_INSN_DATA3_MSUB:
1250                insn = aarch64_insn_get_msub_value();
1251                break;
1252        default:
1253                pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1254                return AARCH64_BREAK_FAULT;
1255        }
1256
1257        switch (variant) {
1258        case AARCH64_INSN_VARIANT_32BIT:
1259                break;
1260        case AARCH64_INSN_VARIANT_64BIT:
1261                insn |= AARCH64_INSN_SF_BIT;
1262                break;
1263        default:
1264                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1265                return AARCH64_BREAK_FAULT;
1266        }
1267
1268        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1269
1270        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1271
1272        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1273                                            reg1);
1274
1275        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1276                                            reg2);
1277}
1278
1279u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1280                                         enum aarch64_insn_register src,
1281                                         enum aarch64_insn_register reg,
1282                                         int shift,
1283                                         enum aarch64_insn_variant variant,
1284                                         enum aarch64_insn_logic_type type)
1285{
1286        u32 insn;
1287
1288        switch (type) {
1289        case AARCH64_INSN_LOGIC_AND:
1290                insn = aarch64_insn_get_and_value();
1291                break;
1292        case AARCH64_INSN_LOGIC_BIC:
1293                insn = aarch64_insn_get_bic_value();
1294                break;
1295        case AARCH64_INSN_LOGIC_ORR:
1296                insn = aarch64_insn_get_orr_value();
1297                break;
1298        case AARCH64_INSN_LOGIC_ORN:
1299                insn = aarch64_insn_get_orn_value();
1300                break;
1301        case AARCH64_INSN_LOGIC_EOR:
1302                insn = aarch64_insn_get_eor_value();
1303                break;
1304        case AARCH64_INSN_LOGIC_EON:
1305                insn = aarch64_insn_get_eon_value();
1306                break;
1307        case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1308                insn = aarch64_insn_get_ands_value();
1309                break;
1310        case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1311                insn = aarch64_insn_get_bics_value();
1312                break;
1313        default:
1314                pr_err("%s: unknown logical encoding %d\n", __func__, type);
1315                return AARCH64_BREAK_FAULT;
1316        }
1317
1318        switch (variant) {
1319        case AARCH64_INSN_VARIANT_32BIT:
1320                if (shift & ~(SZ_32 - 1)) {
1321                        pr_err("%s: invalid shift encoding %d\n", __func__,
1322                               shift);
1323                        return AARCH64_BREAK_FAULT;
1324                }
1325                break;
1326        case AARCH64_INSN_VARIANT_64BIT:
1327                insn |= AARCH64_INSN_SF_BIT;
1328                if (shift & ~(SZ_64 - 1)) {
1329                        pr_err("%s: invalid shift encoding %d\n", __func__,
1330                               shift);
1331                        return AARCH64_BREAK_FAULT;
1332                }
1333                break;
1334        default:
1335                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1336                return AARCH64_BREAK_FAULT;
1337        }
1338
1339
1340        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1341
1342        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1343
1344        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1345
1346        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1347}
1348
1349/*
1350 * Decode the imm field of a branch, and return the byte offset as a
1351 * signed value (so it can be used when computing a new branch
1352 * target).
1353 */
1354s32 aarch64_get_branch_offset(u32 insn)
1355{
1356        s32 imm;
1357
1358        if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1359                imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1360                return (imm << 6) >> 4;
1361        }
1362
1363        if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1364            aarch64_insn_is_bcond(insn)) {
1365                imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1366                return (imm << 13) >> 11;
1367        }
1368
1369        if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1370                imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1371                return (imm << 18) >> 16;
1372        }
1373
1374        /* Unhandled instruction */
1375        BUG();
1376}
1377
1378/*
1379 * Encode the displacement of a branch in the imm field and return the
1380 * updated instruction.
1381 */
1382u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1383{
1384        if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1385                return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1386                                                     offset >> 2);
1387
1388        if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1389            aarch64_insn_is_bcond(insn))
1390                return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1391                                                     offset >> 2);
1392
1393        if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1394                return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1395                                                     offset >> 2);
1396
1397        /* Unhandled instruction */
1398        BUG();
1399}
1400
1401s32 aarch64_insn_adrp_get_offset(u32 insn)
1402{
1403        BUG_ON(!aarch64_insn_is_adrp(insn));
1404        return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1405}
1406
1407u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1408{
1409        BUG_ON(!aarch64_insn_is_adrp(insn));
1410        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1411                                                offset >> 12);
1412}
1413
1414/*
1415 * Extract the Op/CR data from a msr/mrs instruction.
1416 */
1417u32 aarch64_insn_extract_system_reg(u32 insn)
1418{
1419        return (insn & 0x1FFFE0) >> 5;
1420}
1421
1422bool aarch32_insn_is_wide(u32 insn)
1423{
1424        return insn >= 0xe800;
1425}
1426
1427/*
1428 * Macros/defines for extracting register numbers from instruction.
1429 */
1430u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1431{
1432        return (insn & (0xf << offset)) >> offset;
1433}
1434
1435#define OPC2_MASK       0x7
1436#define OPC2_OFFSET     5
1437u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1438{
1439        return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1440}
1441
1442#define CRM_MASK        0xf
1443u32 aarch32_insn_mcr_extract_crm(u32 insn)
1444{
1445        return insn & CRM_MASK;
1446}
1447
1448static bool __kprobes __check_eq(unsigned long pstate)
1449{
1450        return (pstate & PSR_Z_BIT) != 0;
1451}
1452
1453static bool __kprobes __check_ne(unsigned long pstate)
1454{
1455        return (pstate & PSR_Z_BIT) == 0;
1456}
1457
1458static bool __kprobes __check_cs(unsigned long pstate)
1459{
1460        return (pstate & PSR_C_BIT) != 0;
1461}
1462
1463static bool __kprobes __check_cc(unsigned long pstate)
1464{
1465        return (pstate & PSR_C_BIT) == 0;
1466}
1467
1468static bool __kprobes __check_mi(unsigned long pstate)
1469{
1470        return (pstate & PSR_N_BIT) != 0;
1471}
1472
1473static bool __kprobes __check_pl(unsigned long pstate)
1474{
1475        return (pstate & PSR_N_BIT) == 0;
1476}
1477
1478static bool __kprobes __check_vs(unsigned long pstate)
1479{
1480        return (pstate & PSR_V_BIT) != 0;
1481}
1482
1483static bool __kprobes __check_vc(unsigned long pstate)
1484{
1485        return (pstate & PSR_V_BIT) == 0;
1486}
1487
1488static bool __kprobes __check_hi(unsigned long pstate)
1489{
1490        pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1491        return (pstate & PSR_C_BIT) != 0;
1492}
1493
1494static bool __kprobes __check_ls(unsigned long pstate)
1495{
1496        pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1497        return (pstate & PSR_C_BIT) == 0;
1498}
1499
1500static bool __kprobes __check_ge(unsigned long pstate)
1501{
1502        pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1503        return (pstate & PSR_N_BIT) == 0;
1504}
1505
1506static bool __kprobes __check_lt(unsigned long pstate)
1507{
1508        pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1509        return (pstate & PSR_N_BIT) != 0;
1510}
1511
1512static bool __kprobes __check_gt(unsigned long pstate)
1513{
1514        /*PSR_N_BIT ^= PSR_V_BIT */
1515        unsigned long temp = pstate ^ (pstate << 3);
1516
1517        temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1518        return (temp & PSR_N_BIT) == 0;
1519}
1520
1521static bool __kprobes __check_le(unsigned long pstate)
1522{
1523        /*PSR_N_BIT ^= PSR_V_BIT */
1524        unsigned long temp = pstate ^ (pstate << 3);
1525
1526        temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1527        return (temp & PSR_N_BIT) != 0;
1528}
1529
1530static bool __kprobes __check_al(unsigned long pstate)
1531{
1532        return true;
1533}
1534
1535/*
1536 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1537 * it behaves identically to 0b1110 ("al").
1538 */
1539pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1540        __check_eq, __check_ne, __check_cs, __check_cc,
1541        __check_mi, __check_pl, __check_vs, __check_vc,
1542        __check_hi, __check_ls, __check_ge, __check_lt,
1543        __check_gt, __check_le, __check_al, __check_al
1544};
1545
1546static bool range_of_ones(u64 val)
1547{
1548        /* Doesn't handle full ones or full zeroes */
1549        u64 sval = val >> __ffs64(val);
1550
1551        /* One of Sean Eron Anderson's bithack tricks */
1552        return ((sval + 1) & (sval)) == 0;
1553}
1554
1555static u32 aarch64_encode_immediate(u64 imm,
1556                                    enum aarch64_insn_variant variant,
1557                                    u32 insn)
1558{
1559        unsigned int immr, imms, n, ones, ror, esz, tmp;
1560        u64 mask;
1561
1562        switch (variant) {
1563        case AARCH64_INSN_VARIANT_32BIT:
1564                esz = 32;
1565                break;
1566        case AARCH64_INSN_VARIANT_64BIT:
1567                insn |= AARCH64_INSN_SF_BIT;
1568                esz = 64;
1569                break;
1570        default:
1571                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1572                return AARCH64_BREAK_FAULT;
1573        }
1574
1575        mask = GENMASK(esz - 1, 0);
1576
1577        /* Can't encode full zeroes, full ones, or value wider than the mask */
1578        if (!imm || imm == mask || imm & ~mask)
1579                return AARCH64_BREAK_FAULT;
1580
1581        /*
1582         * Inverse of Replicate(). Try to spot a repeating pattern
1583         * with a pow2 stride.
1584         */
1585        for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1586                u64 emask = BIT(tmp) - 1;
1587
1588                if ((imm & emask) != ((imm >> tmp) & emask))
1589                        break;
1590
1591                esz = tmp;
1592                mask = emask;
1593        }
1594
1595        /* N is only set if we're encoding a 64bit value */
1596        n = esz == 64;
1597
1598        /* Trim imm to the element size */
1599        imm &= mask;
1600
1601        /* That's how many ones we need to encode */
1602        ones = hweight64(imm);
1603
1604        /*
1605         * imms is set to (ones - 1), prefixed with a string of ones
1606         * and a zero if they fit. Cap it to 6 bits.
1607         */
1608        imms  = ones - 1;
1609        imms |= 0xf << ffs(esz);
1610        imms &= BIT(6) - 1;
1611
1612        /* Compute the rotation */
1613        if (range_of_ones(imm)) {
1614                /*
1615                 * Pattern: 0..01..10..0
1616                 *
1617                 * Compute how many rotate we need to align it right
1618                 */
1619                ror = __ffs64(imm);
1620        } else {
1621                /*
1622                 * Pattern: 0..01..10..01..1
1623                 *
1624                 * Fill the unused top bits with ones, and check if
1625                 * the result is a valid immediate (all ones with a
1626                 * contiguous ranges of zeroes).
1627                 */
1628                imm |= ~mask;
1629                if (!range_of_ones(~imm))
1630                        return AARCH64_BREAK_FAULT;
1631
1632                /*
1633                 * Compute the rotation to get a continuous set of
1634                 * ones, with the first bit set at position 0
1635                 */
1636                ror = fls(~imm);
1637        }
1638
1639        /*
1640         * immr is the number of bits we need to rotate back to the
1641         * original set of ones. Note that this is relative to the
1642         * element size...
1643         */
1644        immr = (esz - ror) % esz;
1645
1646        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1647        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1648        return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1649}
1650
1651u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1652                                       enum aarch64_insn_variant variant,
1653                                       enum aarch64_insn_register Rn,
1654                                       enum aarch64_insn_register Rd,
1655                                       u64 imm)
1656{
1657        u32 insn;
1658
1659        switch (type) {
1660        case AARCH64_INSN_LOGIC_AND:
1661                insn = aarch64_insn_get_and_imm_value();
1662                break;
1663        case AARCH64_INSN_LOGIC_ORR:
1664                insn = aarch64_insn_get_orr_imm_value();
1665                break;
1666        case AARCH64_INSN_LOGIC_EOR:
1667                insn = aarch64_insn_get_eor_imm_value();
1668                break;
1669        case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1670                insn = aarch64_insn_get_ands_imm_value();
1671                break;
1672        default:
1673                pr_err("%s: unknown logical encoding %d\n", __func__, type);
1674                return AARCH64_BREAK_FAULT;
1675        }
1676
1677        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1678        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1679        return aarch64_encode_immediate(imm, variant, insn);
1680}
1681
1682u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1683                          enum aarch64_insn_register Rm,
1684                          enum aarch64_insn_register Rn,
1685                          enum aarch64_insn_register Rd,
1686                          u8 lsb)
1687{
1688        u32 insn;
1689
1690        insn = aarch64_insn_get_extr_value();
1691
1692        switch (variant) {
1693        case AARCH64_INSN_VARIANT_32BIT:
1694                if (lsb > 31)
1695                        return AARCH64_BREAK_FAULT;
1696                break;
1697        case AARCH64_INSN_VARIANT_64BIT:
1698                if (lsb > 63)
1699                        return AARCH64_BREAK_FAULT;
1700                insn |= AARCH64_INSN_SF_BIT;
1701                insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1702                break;
1703        default:
1704                pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1705                return AARCH64_BREAK_FAULT;
1706        }
1707
1708        insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1709        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1710        insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1711        return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1712}
1713