linux/arch/s390/net/bpf_jit_comp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
  10 *  - PACK_STACK
  11 *  - 64BIT
  12 *
  13 * Copyright IBM Corp. 2012,2015
  14 *
  15 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  16 *            Michael Holzheu <holzheu@linux.vnet.ibm.com>
  17 */
  18
  19#define KMSG_COMPONENT "bpf_jit"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/netdevice.h>
  23#include <linux/filter.h>
  24#include <linux/init.h>
  25#include <linux/bpf.h>
  26#include <linux/mm.h>
  27#include <linux/kernel.h>
  28#include <asm/cacheflush.h>
  29#include <asm/dis.h>
  30#include <asm/facility.h>
  31#include <asm/nospec-branch.h>
  32#include <asm/set_memory.h>
  33#include "bpf_jit.h"
  34
  35struct bpf_jit {
  36        u32 seen;               /* Flags to remember seen eBPF instructions */
  37        u32 seen_reg[16];       /* Array to remember which registers are used */
  38        u32 *addrs;             /* Array with relative instruction addresses */
  39        u8 *prg_buf;            /* Start of program */
  40        int size;               /* Size of program and literal pool */
  41        int size_prg;           /* Size of program */
  42        int prg;                /* Current position in program */
  43        int lit32_start;        /* Start of 32-bit literal pool */
  44        int lit32;              /* Current position in 32-bit literal pool */
  45        int lit64_start;        /* Start of 64-bit literal pool */
  46        int lit64;              /* Current position in 64-bit literal pool */
  47        int base_ip;            /* Base address for literal pool */
  48        int exit_ip;            /* Address of exit */
  49        int r1_thunk_ip;        /* Address of expoline thunk for 'br %r1' */
  50        int r14_thunk_ip;       /* Address of expoline thunk for 'br %r14' */
  51        int tail_call_start;    /* Tail call start offset */
  52        int excnt;              /* Number of exception table entries */
  53};
  54
  55#define SEEN_MEM        BIT(0)          /* use mem[] for temporary storage */
  56#define SEEN_LITERAL    BIT(1)          /* code uses literals */
  57#define SEEN_FUNC       BIT(2)          /* calls C functions */
  58#define SEEN_TAIL_CALL  BIT(3)          /* code uses tail calls */
  59#define SEEN_STACK      (SEEN_FUNC | SEEN_MEM)
  60
  61/*
  62 * s390 registers
  63 */
  64#define REG_W0          (MAX_BPF_JIT_REG + 0)   /* Work register 1 (even) */
  65#define REG_W1          (MAX_BPF_JIT_REG + 1)   /* Work register 2 (odd) */
  66#define REG_L           (MAX_BPF_JIT_REG + 2)   /* Literal pool register */
  67#define REG_15          (MAX_BPF_JIT_REG + 3)   /* Register 15 */
  68#define REG_0           REG_W0                  /* Register 0 */
  69#define REG_1           REG_W1                  /* Register 1 */
  70#define REG_2           BPF_REG_1               /* Register 2 */
  71#define REG_14          BPF_REG_0               /* Register 14 */
  72
  73/*
  74 * Mapping of BPF registers to s390 registers
  75 */
  76static const int reg2hex[] = {
  77        /* Return code */
  78        [BPF_REG_0]     = 14,
  79        /* Function parameters */
  80        [BPF_REG_1]     = 2,
  81        [BPF_REG_2]     = 3,
  82        [BPF_REG_3]     = 4,
  83        [BPF_REG_4]     = 5,
  84        [BPF_REG_5]     = 6,
  85        /* Call saved registers */
  86        [BPF_REG_6]     = 7,
  87        [BPF_REG_7]     = 8,
  88        [BPF_REG_8]     = 9,
  89        [BPF_REG_9]     = 10,
  90        /* BPF stack pointer */
  91        [BPF_REG_FP]    = 13,
  92        /* Register for blinding */
  93        [BPF_REG_AX]    = 12,
  94        /* Work registers for s390x backend */
  95        [REG_W0]        = 0,
  96        [REG_W1]        = 1,
  97        [REG_L]         = 11,
  98        [REG_15]        = 15,
  99};
 100
 101static inline u32 reg(u32 dst_reg, u32 src_reg)
 102{
 103        return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 104}
 105
 106static inline u32 reg_high(u32 reg)
 107{
 108        return reg2hex[reg] << 4;
 109}
 110
 111static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 112{
 113        u32 r1 = reg2hex[b1];
 114
 115        if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 116                jit->seen_reg[r1] = 1;
 117}
 118
 119#define REG_SET_SEEN(b1)                                        \
 120({                                                              \
 121        reg_set_seen(jit, b1);                                  \
 122})
 123
 124#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 125
 126/*
 127 * EMIT macros for code generation
 128 */
 129
 130#define _EMIT2(op)                                              \
 131({                                                              \
 132        if (jit->prg_buf)                                       \
 133                *(u16 *) (jit->prg_buf + jit->prg) = (op);      \
 134        jit->prg += 2;                                          \
 135})
 136
 137#define EMIT2(op, b1, b2)                                       \
 138({                                                              \
 139        _EMIT2((op) | reg(b1, b2));                             \
 140        REG_SET_SEEN(b1);                                       \
 141        REG_SET_SEEN(b2);                                       \
 142})
 143
 144#define _EMIT4(op)                                              \
 145({                                                              \
 146        if (jit->prg_buf)                                       \
 147                *(u32 *) (jit->prg_buf + jit->prg) = (op);      \
 148        jit->prg += 4;                                          \
 149})
 150
 151#define EMIT4(op, b1, b2)                                       \
 152({                                                              \
 153        _EMIT4((op) | reg(b1, b2));                             \
 154        REG_SET_SEEN(b1);                                       \
 155        REG_SET_SEEN(b2);                                       \
 156})
 157
 158#define EMIT4_RRF(op, b1, b2, b3)                               \
 159({                                                              \
 160        _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2));         \
 161        REG_SET_SEEN(b1);                                       \
 162        REG_SET_SEEN(b2);                                       \
 163        REG_SET_SEEN(b3);                                       \
 164})
 165
 166#define _EMIT4_DISP(op, disp)                                   \
 167({                                                              \
 168        unsigned int __disp = (disp) & 0xfff;                   \
 169        _EMIT4((op) | __disp);                                  \
 170})
 171
 172#define EMIT4_DISP(op, b1, b2, disp)                            \
 173({                                                              \
 174        _EMIT4_DISP((op) | reg_high(b1) << 16 |                 \
 175                    reg_high(b2) << 8, (disp));                 \
 176        REG_SET_SEEN(b1);                                       \
 177        REG_SET_SEEN(b2);                                       \
 178})
 179
 180#define EMIT4_IMM(op, b1, imm)                                  \
 181({                                                              \
 182        unsigned int __imm = (imm) & 0xffff;                    \
 183        _EMIT4((op) | reg_high(b1) << 16 | __imm);              \
 184        REG_SET_SEEN(b1);                                       \
 185})
 186
 187#define EMIT4_PCREL(op, pcrel)                                  \
 188({                                                              \
 189        long __pcrel = ((pcrel) >> 1) & 0xffff;                 \
 190        _EMIT4((op) | __pcrel);                                 \
 191})
 192
 193#define EMIT4_PCREL_RIC(op, mask, target)                       \
 194({                                                              \
 195        int __rel = ((target) - jit->prg) / 2;                  \
 196        _EMIT4((op) | (mask) << 20 | (__rel & 0xffff));         \
 197})
 198
 199#define _EMIT6(op1, op2)                                        \
 200({                                                              \
 201        if (jit->prg_buf) {                                     \
 202                *(u32 *) (jit->prg_buf + jit->prg) = (op1);     \
 203                *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
 204        }                                                       \
 205        jit->prg += 6;                                          \
 206})
 207
 208#define _EMIT6_DISP(op1, op2, disp)                             \
 209({                                                              \
 210        unsigned int __disp = (disp) & 0xfff;                   \
 211        _EMIT6((op1) | __disp, op2);                            \
 212})
 213
 214#define _EMIT6_DISP_LH(op1, op2, disp)                          \
 215({                                                              \
 216        u32 _disp = (u32) (disp);                               \
 217        unsigned int __disp_h = _disp & 0xff000;                \
 218        unsigned int __disp_l = _disp & 0x00fff;                \
 219        _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4);        \
 220})
 221
 222#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)               \
 223({                                                              \
 224        _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 |              \
 225                       reg_high(b3) << 8, op2, disp);           \
 226        REG_SET_SEEN(b1);                                       \
 227        REG_SET_SEEN(b2);                                       \
 228        REG_SET_SEEN(b3);                                       \
 229})
 230
 231#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target)        \
 232({                                                              \
 233        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 234        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),      \
 235               (op2) | (mask) << 12);                           \
 236        REG_SET_SEEN(b1);                                       \
 237        REG_SET_SEEN(b2);                                       \
 238})
 239
 240#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target)       \
 241({                                                              \
 242        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 243        _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |          \
 244                (rel & 0xffff), (op2) | ((imm) & 0xff) << 8);   \
 245        REG_SET_SEEN(b1);                                       \
 246        BUILD_BUG_ON(((unsigned long) (imm)) > 0xff);           \
 247})
 248
 249#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)             \
 250({                                                              \
 251        /* Branch instruction needs 6 bytes */                  \
 252        int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
 253        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
 254        REG_SET_SEEN(b1);                                       \
 255        REG_SET_SEEN(b2);                                       \
 256})
 257
 258#define EMIT6_PCREL_RILB(op, b, target)                         \
 259({                                                              \
 260        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 261        _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
 262        REG_SET_SEEN(b);                                        \
 263})
 264
 265#define EMIT6_PCREL_RIL(op, target)                             \
 266({                                                              \
 267        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 268        _EMIT6((op) | rel >> 16, rel & 0xffff);                 \
 269})
 270
 271#define EMIT6_PCREL_RILC(op, mask, target)                      \
 272({                                                              \
 273        EMIT6_PCREL_RIL((op) | (mask) << 20, (target));         \
 274})
 275
 276#define _EMIT6_IMM(op, imm)                                     \
 277({                                                              \
 278        unsigned int __imm = (imm);                             \
 279        _EMIT6((op) | (__imm >> 16), __imm & 0xffff);           \
 280})
 281
 282#define EMIT6_IMM(op, b1, imm)                                  \
 283({                                                              \
 284        _EMIT6_IMM((op) | reg_high(b1) << 16, imm);             \
 285        REG_SET_SEEN(b1);                                       \
 286})
 287
 288#define _EMIT_CONST_U32(val)                                    \
 289({                                                              \
 290        unsigned int ret;                                       \
 291        ret = jit->lit32;                                       \
 292        if (jit->prg_buf)                                       \
 293                *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
 294        jit->lit32 += 4;                                        \
 295        ret;                                                    \
 296})
 297
 298#define EMIT_CONST_U32(val)                                     \
 299({                                                              \
 300        jit->seen |= SEEN_LITERAL;                              \
 301        _EMIT_CONST_U32(val) - jit->base_ip;                    \
 302})
 303
 304#define _EMIT_CONST_U64(val)                                    \
 305({                                                              \
 306        unsigned int ret;                                       \
 307        ret = jit->lit64;                                       \
 308        if (jit->prg_buf)                                       \
 309                *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
 310        jit->lit64 += 8;                                        \
 311        ret;                                                    \
 312})
 313
 314#define EMIT_CONST_U64(val)                                     \
 315({                                                              \
 316        jit->seen |= SEEN_LITERAL;                              \
 317        _EMIT_CONST_U64(val) - jit->base_ip;                    \
 318})
 319
 320#define EMIT_ZERO(b1)                                           \
 321({                                                              \
 322        if (!fp->aux->verifier_zext) {                          \
 323                /* llgfr %dst,%dst (zero extend to 64 bit) */   \
 324                EMIT4(0xb9160000, b1, b1);                      \
 325                REG_SET_SEEN(b1);                               \
 326        }                                                       \
 327})
 328
 329/*
 330 * Return whether this is the first pass. The first pass is special, since we
 331 * don't know any sizes yet, and thus must be conservative.
 332 */
 333static bool is_first_pass(struct bpf_jit *jit)
 334{
 335        return jit->size == 0;
 336}
 337
 338/*
 339 * Return whether this is the code generation pass. The code generation pass is
 340 * special, since we should change as little as possible.
 341 */
 342static bool is_codegen_pass(struct bpf_jit *jit)
 343{
 344        return jit->prg_buf;
 345}
 346
 347/*
 348 * Return whether "rel" can be encoded as a short PC-relative offset
 349 */
 350static bool is_valid_rel(int rel)
 351{
 352        return rel >= -65536 && rel <= 65534;
 353}
 354
 355/*
 356 * Return whether "off" can be reached using a short PC-relative offset
 357 */
 358static bool can_use_rel(struct bpf_jit *jit, int off)
 359{
 360        return is_valid_rel(off - jit->prg);
 361}
 362
 363/*
 364 * Return whether given displacement can be encoded using
 365 * Long-Displacement Facility
 366 */
 367static bool is_valid_ldisp(int disp)
 368{
 369        return disp >= -524288 && disp <= 524287;
 370}
 371
 372/*
 373 * Return whether the next 32-bit literal pool entry can be referenced using
 374 * Long-Displacement Facility
 375 */
 376static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
 377{
 378        return is_valid_ldisp(jit->lit32 - jit->base_ip);
 379}
 380
 381/*
 382 * Return whether the next 64-bit literal pool entry can be referenced using
 383 * Long-Displacement Facility
 384 */
 385static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
 386{
 387        return is_valid_ldisp(jit->lit64 - jit->base_ip);
 388}
 389
 390/*
 391 * Fill whole space with illegal instructions
 392 */
 393static void jit_fill_hole(void *area, unsigned int size)
 394{
 395        memset(area, 0, size);
 396}
 397
 398/*
 399 * Save registers from "rs" (register start) to "re" (register end) on stack
 400 */
 401static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 402{
 403        u32 off = STK_OFF_R6 + (rs - 6) * 8;
 404
 405        if (rs == re)
 406                /* stg %rs,off(%r15) */
 407                _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 408        else
 409                /* stmg %rs,%re,off(%r15) */
 410                _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 411}
 412
 413/*
 414 * Restore registers from "rs" (register start) to "re" (register end) on stack
 415 */
 416static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 417{
 418        u32 off = STK_OFF_R6 + (rs - 6) * 8;
 419
 420        if (jit->seen & SEEN_STACK)
 421                off += STK_OFF + stack_depth;
 422
 423        if (rs == re)
 424                /* lg %rs,off(%r15) */
 425                _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 426        else
 427                /* lmg %rs,%re,off(%r15) */
 428                _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 429}
 430
 431/*
 432 * Return first seen register (from start)
 433 */
 434static int get_start(struct bpf_jit *jit, int start)
 435{
 436        int i;
 437
 438        for (i = start; i <= 15; i++) {
 439                if (jit->seen_reg[i])
 440                        return i;
 441        }
 442        return 0;
 443}
 444
 445/*
 446 * Return last seen register (from start) (gap >= 2)
 447 */
 448static int get_end(struct bpf_jit *jit, int start)
 449{
 450        int i;
 451
 452        for (i = start; i < 15; i++) {
 453                if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 454                        return i - 1;
 455        }
 456        return jit->seen_reg[15] ? 15 : 14;
 457}
 458
 459#define REGS_SAVE       1
 460#define REGS_RESTORE    0
 461/*
 462 * Save and restore clobbered registers (6-15) on stack.
 463 * We save/restore registers in chunks with gap >= 2 registers.
 464 */
 465static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 466{
 467        const int last = 15, save_restore_size = 6;
 468        int re = 6, rs;
 469
 470        if (is_first_pass(jit)) {
 471                /*
 472                 * We don't know yet which registers are used. Reserve space
 473                 * conservatively.
 474                 */
 475                jit->prg += (last - re + 1) * save_restore_size;
 476                return;
 477        }
 478
 479        do {
 480                rs = get_start(jit, re);
 481                if (!rs)
 482                        break;
 483                re = get_end(jit, rs + 1);
 484                if (op == REGS_SAVE)
 485                        save_regs(jit, rs, re);
 486                else
 487                        restore_regs(jit, rs, re, stack_depth);
 488                re++;
 489        } while (re <= last);
 490}
 491
 492static void bpf_skip(struct bpf_jit *jit, int size)
 493{
 494        if (size >= 6 && !is_valid_rel(size)) {
 495                /* brcl 0xf,size */
 496                EMIT6_PCREL_RIL(0xc0f4000000, size);
 497                size -= 6;
 498        } else if (size >= 4 && is_valid_rel(size)) {
 499                /* brc 0xf,size */
 500                EMIT4_PCREL(0xa7f40000, size);
 501                size -= 4;
 502        }
 503        while (size >= 2) {
 504                /* bcr 0,%0 */
 505                _EMIT2(0x0700);
 506                size -= 2;
 507        }
 508}
 509
 510/*
 511 * Emit function prologue
 512 *
 513 * Save registers and create stack frame if necessary.
 514 * See stack frame layout desription in "bpf_jit.h"!
 515 */
 516static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 517{
 518        if (jit->seen & SEEN_TAIL_CALL) {
 519                /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 520                _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 521        } else {
 522                /*
 523                 * There are no tail calls. Insert nops in order to have
 524                 * tail_call_start at a predictable offset.
 525                 */
 526                bpf_skip(jit, 6);
 527        }
 528        /* Tail calls have to skip above initialization */
 529        jit->tail_call_start = jit->prg;
 530        /* Save registers */
 531        save_restore_regs(jit, REGS_SAVE, stack_depth);
 532        /* Setup literal pool */
 533        if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
 534                if (!is_first_pass(jit) &&
 535                    is_valid_ldisp(jit->size - (jit->prg + 2))) {
 536                        /* basr %l,0 */
 537                        EMIT2(0x0d00, REG_L, REG_0);
 538                        jit->base_ip = jit->prg;
 539                } else {
 540                        /* larl %l,lit32_start */
 541                        EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
 542                        jit->base_ip = jit->lit32_start;
 543                }
 544        }
 545        /* Setup stack and backchain */
 546        if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
 547                if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 548                        /* lgr %w1,%r15 (backchain) */
 549                        EMIT4(0xb9040000, REG_W1, REG_15);
 550                /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 551                EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 552                /* aghi %r15,-STK_OFF */
 553                EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 554                if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 555                        /* stg %w1,152(%r15) (backchain) */
 556                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 557                                      REG_15, 152);
 558        }
 559}
 560
 561/*
 562 * Function epilogue
 563 */
 564static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 565{
 566        jit->exit_ip = jit->prg;
 567        /* Load exit code: lgr %r2,%b0 */
 568        EMIT4(0xb9040000, REG_2, BPF_REG_0);
 569        /* Restore registers */
 570        save_restore_regs(jit, REGS_RESTORE, stack_depth);
 571        if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
 572                jit->r14_thunk_ip = jit->prg;
 573                /* Generate __s390_indirect_jump_r14 thunk */
 574                if (test_facility(35)) {
 575                        /* exrl %r0,.+10 */
 576                        EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 577                } else {
 578                        /* larl %r1,.+14 */
 579                        EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 580                        /* ex 0,0(%r1) */
 581                        EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
 582                }
 583                /* j . */
 584                EMIT4_PCREL(0xa7f40000, 0);
 585        }
 586        /* br %r14 */
 587        _EMIT2(0x07fe);
 588
 589        if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
 590            (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
 591                jit->r1_thunk_ip = jit->prg;
 592                /* Generate __s390_indirect_jump_r1 thunk */
 593                if (test_facility(35)) {
 594                        /* exrl %r0,.+10 */
 595                        EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 596                        /* j . */
 597                        EMIT4_PCREL(0xa7f40000, 0);
 598                        /* br %r1 */
 599                        _EMIT2(0x07f1);
 600                } else {
 601                        /* ex 0,S390_lowcore.br_r1_tampoline */
 602                        EMIT4_DISP(0x44000000, REG_0, REG_0,
 603                                   offsetof(struct lowcore, br_r1_trampoline));
 604                        /* j . */
 605                        EMIT4_PCREL(0xa7f40000, 0);
 606                }
 607        }
 608}
 609
 610static int get_probe_mem_regno(const u8 *insn)
 611{
 612        /*
 613         * insn must point to llgc, llgh, llgf or lg, which have destination
 614         * register at the same position.
 615         */
 616        if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
 617                return -1;
 618        if (insn[5] != 0x90 && /* llgc */
 619            insn[5] != 0x91 && /* llgh */
 620            insn[5] != 0x16 && /* llgf */
 621            insn[5] != 0x04) /* lg */
 622                return -1;
 623        return insn[1] >> 4;
 624}
 625
 626static bool ex_handler_bpf(const struct exception_table_entry *x,
 627                           struct pt_regs *regs)
 628{
 629        int regno;
 630        u8 *insn;
 631
 632        regs->psw.addr = extable_fixup(x);
 633        insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16);
 634        regno = get_probe_mem_regno(insn);
 635        if (WARN_ON_ONCE(regno < 0))
 636                /* JIT bug - unexpected instruction. */
 637                return false;
 638        regs->gprs[regno] = 0;
 639        return true;
 640}
 641
 642static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
 643                             int probe_prg, int nop_prg)
 644{
 645        struct exception_table_entry *ex;
 646        s64 delta;
 647        u8 *insn;
 648        int prg;
 649        int i;
 650
 651        if (!fp->aux->extable)
 652                /* Do nothing during early JIT passes. */
 653                return 0;
 654        insn = jit->prg_buf + probe_prg;
 655        if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0))
 656                /* JIT bug - unexpected probe instruction. */
 657                return -1;
 658        if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
 659                /* JIT bug - gap between probe and nop instructions. */
 660                return -1;
 661        for (i = 0; i < 2; i++) {
 662                if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
 663                        /* Verifier bug - not enough entries. */
 664                        return -1;
 665                ex = &fp->aux->extable[jit->excnt];
 666                /* Add extable entries for probe and nop instructions. */
 667                prg = i == 0 ? probe_prg : nop_prg;
 668                delta = jit->prg_buf + prg - (u8 *)&ex->insn;
 669                if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 670                        /* JIT bug - code and extable must be close. */
 671                        return -1;
 672                ex->insn = delta;
 673                /*
 674                 * Always land on the nop. Note that extable infrastructure
 675                 * ignores fixup field, it is handled by ex_handler_bpf().
 676                 */
 677                delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
 678                if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 679                        /* JIT bug - landing pad and extable must be close. */
 680                        return -1;
 681                ex->fixup = delta;
 682                ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
 683                jit->excnt++;
 684        }
 685        return 0;
 686}
 687
 688/*
 689 * Compile one eBPF instruction into s390x code
 690 *
 691 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 692 * stack space for the large switch statement.
 693 */
 694static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 695                                 int i, bool extra_pass, u32 stack_depth)
 696{
 697        struct bpf_insn *insn = &fp->insnsi[i];
 698        u32 dst_reg = insn->dst_reg;
 699        u32 src_reg = insn->src_reg;
 700        int last, insn_count = 1;
 701        u32 *addrs = jit->addrs;
 702        s32 imm = insn->imm;
 703        s16 off = insn->off;
 704        int probe_prg = -1;
 705        unsigned int mask;
 706        int nop_prg;
 707        int err;
 708
 709        if (BPF_CLASS(insn->code) == BPF_LDX &&
 710            BPF_MODE(insn->code) == BPF_PROBE_MEM)
 711                probe_prg = jit->prg;
 712
 713        switch (insn->code) {
 714        /*
 715         * BPF_MOV
 716         */
 717        case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 718                /* llgfr %dst,%src */
 719                EMIT4(0xb9160000, dst_reg, src_reg);
 720                if (insn_is_zext(&insn[1]))
 721                        insn_count = 2;
 722                break;
 723        case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 724                /* lgr %dst,%src */
 725                EMIT4(0xb9040000, dst_reg, src_reg);
 726                break;
 727        case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 728                /* llilf %dst,imm */
 729                EMIT6_IMM(0xc00f0000, dst_reg, imm);
 730                if (insn_is_zext(&insn[1]))
 731                        insn_count = 2;
 732                break;
 733        case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 734                /* lgfi %dst,imm */
 735                EMIT6_IMM(0xc0010000, dst_reg, imm);
 736                break;
 737        /*
 738         * BPF_LD 64
 739         */
 740        case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 741        {
 742                /* 16 byte instruction that uses two 'struct bpf_insn' */
 743                u64 imm64;
 744
 745                imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 746                /* lgrl %dst,imm */
 747                EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
 748                insn_count = 2;
 749                break;
 750        }
 751        /*
 752         * BPF_ADD
 753         */
 754        case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 755                /* ar %dst,%src */
 756                EMIT2(0x1a00, dst_reg, src_reg);
 757                EMIT_ZERO(dst_reg);
 758                break;
 759        case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 760                /* agr %dst,%src */
 761                EMIT4(0xb9080000, dst_reg, src_reg);
 762                break;
 763        case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 764                if (!imm)
 765                        break;
 766                /* alfi %dst,imm */
 767                EMIT6_IMM(0xc20b0000, dst_reg, imm);
 768                EMIT_ZERO(dst_reg);
 769                break;
 770        case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 771                if (!imm)
 772                        break;
 773                /* agfi %dst,imm */
 774                EMIT6_IMM(0xc2080000, dst_reg, imm);
 775                break;
 776        /*
 777         * BPF_SUB
 778         */
 779        case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 780                /* sr %dst,%src */
 781                EMIT2(0x1b00, dst_reg, src_reg);
 782                EMIT_ZERO(dst_reg);
 783                break;
 784        case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 785                /* sgr %dst,%src */
 786                EMIT4(0xb9090000, dst_reg, src_reg);
 787                break;
 788        case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 789                if (!imm)
 790                        break;
 791                /* alfi %dst,-imm */
 792                EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 793                EMIT_ZERO(dst_reg);
 794                break;
 795        case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 796                if (!imm)
 797                        break;
 798                /* agfi %dst,-imm */
 799                EMIT6_IMM(0xc2080000, dst_reg, -imm);
 800                break;
 801        /*
 802         * BPF_MUL
 803         */
 804        case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 805                /* msr %dst,%src */
 806                EMIT4(0xb2520000, dst_reg, src_reg);
 807                EMIT_ZERO(dst_reg);
 808                break;
 809        case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 810                /* msgr %dst,%src */
 811                EMIT4(0xb90c0000, dst_reg, src_reg);
 812                break;
 813        case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 814                if (imm == 1)
 815                        break;
 816                /* msfi %r5,imm */
 817                EMIT6_IMM(0xc2010000, dst_reg, imm);
 818                EMIT_ZERO(dst_reg);
 819                break;
 820        case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 821                if (imm == 1)
 822                        break;
 823                /* msgfi %dst,imm */
 824                EMIT6_IMM(0xc2000000, dst_reg, imm);
 825                break;
 826        /*
 827         * BPF_DIV / BPF_MOD
 828         */
 829        case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 830        case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 831        {
 832                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 833
 834                /* lhi %w0,0 */
 835                EMIT4_IMM(0xa7080000, REG_W0, 0);
 836                /* lr %w1,%dst */
 837                EMIT2(0x1800, REG_W1, dst_reg);
 838                /* dlr %w0,%src */
 839                EMIT4(0xb9970000, REG_W0, src_reg);
 840                /* llgfr %dst,%rc */
 841                EMIT4(0xb9160000, dst_reg, rc_reg);
 842                if (insn_is_zext(&insn[1]))
 843                        insn_count = 2;
 844                break;
 845        }
 846        case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 847        case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 848        {
 849                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 850
 851                /* lghi %w0,0 */
 852                EMIT4_IMM(0xa7090000, REG_W0, 0);
 853                /* lgr %w1,%dst */
 854                EMIT4(0xb9040000, REG_W1, dst_reg);
 855                /* dlgr %w0,%dst */
 856                EMIT4(0xb9870000, REG_W0, src_reg);
 857                /* lgr %dst,%rc */
 858                EMIT4(0xb9040000, dst_reg, rc_reg);
 859                break;
 860        }
 861        case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 862        case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 863        {
 864                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 865
 866                if (imm == 1) {
 867                        if (BPF_OP(insn->code) == BPF_MOD)
 868                                /* lhgi %dst,0 */
 869                                EMIT4_IMM(0xa7090000, dst_reg, 0);
 870                        break;
 871                }
 872                /* lhi %w0,0 */
 873                EMIT4_IMM(0xa7080000, REG_W0, 0);
 874                /* lr %w1,%dst */
 875                EMIT2(0x1800, REG_W1, dst_reg);
 876                if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
 877                        /* dl %w0,<d(imm)>(%l) */
 878                        EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 879                                      EMIT_CONST_U32(imm));
 880                } else {
 881                        /* lgfrl %dst,imm */
 882                        EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
 883                                         _EMIT_CONST_U32(imm));
 884                        jit->seen |= SEEN_LITERAL;
 885                        /* dlr %w0,%dst */
 886                        EMIT4(0xb9970000, REG_W0, dst_reg);
 887                }
 888                /* llgfr %dst,%rc */
 889                EMIT4(0xb9160000, dst_reg, rc_reg);
 890                if (insn_is_zext(&insn[1]))
 891                        insn_count = 2;
 892                break;
 893        }
 894        case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 895        case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 896        {
 897                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 898
 899                if (imm == 1) {
 900                        if (BPF_OP(insn->code) == BPF_MOD)
 901                                /* lhgi %dst,0 */
 902                                EMIT4_IMM(0xa7090000, dst_reg, 0);
 903                        break;
 904                }
 905                /* lghi %w0,0 */
 906                EMIT4_IMM(0xa7090000, REG_W0, 0);
 907                /* lgr %w1,%dst */
 908                EMIT4(0xb9040000, REG_W1, dst_reg);
 909                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 910                        /* dlg %w0,<d(imm)>(%l) */
 911                        EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 912                                      EMIT_CONST_U64(imm));
 913                } else {
 914                        /* lgrl %dst,imm */
 915                        EMIT6_PCREL_RILB(0xc4080000, dst_reg,
 916                                         _EMIT_CONST_U64(imm));
 917                        jit->seen |= SEEN_LITERAL;
 918                        /* dlgr %w0,%dst */
 919                        EMIT4(0xb9870000, REG_W0, dst_reg);
 920                }
 921                /* lgr %dst,%rc */
 922                EMIT4(0xb9040000, dst_reg, rc_reg);
 923                break;
 924        }
 925        /*
 926         * BPF_AND
 927         */
 928        case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 929                /* nr %dst,%src */
 930                EMIT2(0x1400, dst_reg, src_reg);
 931                EMIT_ZERO(dst_reg);
 932                break;
 933        case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 934                /* ngr %dst,%src */
 935                EMIT4(0xb9800000, dst_reg, src_reg);
 936                break;
 937        case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 938                /* nilf %dst,imm */
 939                EMIT6_IMM(0xc00b0000, dst_reg, imm);
 940                EMIT_ZERO(dst_reg);
 941                break;
 942        case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 943                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 944                        /* ng %dst,<d(imm)>(%l) */
 945                        EMIT6_DISP_LH(0xe3000000, 0x0080,
 946                                      dst_reg, REG_0, REG_L,
 947                                      EMIT_CONST_U64(imm));
 948                } else {
 949                        /* lgrl %w0,imm */
 950                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 951                                         _EMIT_CONST_U64(imm));
 952                        jit->seen |= SEEN_LITERAL;
 953                        /* ngr %dst,%w0 */
 954                        EMIT4(0xb9800000, dst_reg, REG_W0);
 955                }
 956                break;
 957        /*
 958         * BPF_OR
 959         */
 960        case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 961                /* or %dst,%src */
 962                EMIT2(0x1600, dst_reg, src_reg);
 963                EMIT_ZERO(dst_reg);
 964                break;
 965        case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 966                /* ogr %dst,%src */
 967                EMIT4(0xb9810000, dst_reg, src_reg);
 968                break;
 969        case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 970                /* oilf %dst,imm */
 971                EMIT6_IMM(0xc00d0000, dst_reg, imm);
 972                EMIT_ZERO(dst_reg);
 973                break;
 974        case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 975                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 976                        /* og %dst,<d(imm)>(%l) */
 977                        EMIT6_DISP_LH(0xe3000000, 0x0081,
 978                                      dst_reg, REG_0, REG_L,
 979                                      EMIT_CONST_U64(imm));
 980                } else {
 981                        /* lgrl %w0,imm */
 982                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 983                                         _EMIT_CONST_U64(imm));
 984                        jit->seen |= SEEN_LITERAL;
 985                        /* ogr %dst,%w0 */
 986                        EMIT4(0xb9810000, dst_reg, REG_W0);
 987                }
 988                break;
 989        /*
 990         * BPF_XOR
 991         */
 992        case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 993                /* xr %dst,%src */
 994                EMIT2(0x1700, dst_reg, src_reg);
 995                EMIT_ZERO(dst_reg);
 996                break;
 997        case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 998                /* xgr %dst,%src */
 999                EMIT4(0xb9820000, dst_reg, src_reg);
1000                break;
1001        case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1002                if (!imm)
1003                        break;
1004                /* xilf %dst,imm */
1005                EMIT6_IMM(0xc0070000, dst_reg, imm);
1006                EMIT_ZERO(dst_reg);
1007                break;
1008        case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1009                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1010                        /* xg %dst,<d(imm)>(%l) */
1011                        EMIT6_DISP_LH(0xe3000000, 0x0082,
1012                                      dst_reg, REG_0, REG_L,
1013                                      EMIT_CONST_U64(imm));
1014                } else {
1015                        /* lgrl %w0,imm */
1016                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1017                                         _EMIT_CONST_U64(imm));
1018                        jit->seen |= SEEN_LITERAL;
1019                        /* xgr %dst,%w0 */
1020                        EMIT4(0xb9820000, dst_reg, REG_W0);
1021                }
1022                break;
1023        /*
1024         * BPF_LSH
1025         */
1026        case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1027                /* sll %dst,0(%src) */
1028                EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1029                EMIT_ZERO(dst_reg);
1030                break;
1031        case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1032                /* sllg %dst,%dst,0(%src) */
1033                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1034                break;
1035        case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1036                if (imm == 0)
1037                        break;
1038                /* sll %dst,imm(%r0) */
1039                EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1040                EMIT_ZERO(dst_reg);
1041                break;
1042        case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1043                if (imm == 0)
1044                        break;
1045                /* sllg %dst,%dst,imm(%r0) */
1046                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1047                break;
1048        /*
1049         * BPF_RSH
1050         */
1051        case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1052                /* srl %dst,0(%src) */
1053                EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1054                EMIT_ZERO(dst_reg);
1055                break;
1056        case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1057                /* srlg %dst,%dst,0(%src) */
1058                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1059                break;
1060        case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1061                if (imm == 0)
1062                        break;
1063                /* srl %dst,imm(%r0) */
1064                EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1065                EMIT_ZERO(dst_reg);
1066                break;
1067        case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1068                if (imm == 0)
1069                        break;
1070                /* srlg %dst,%dst,imm(%r0) */
1071                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1072                break;
1073        /*
1074         * BPF_ARSH
1075         */
1076        case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1077                /* sra %dst,%dst,0(%src) */
1078                EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1079                EMIT_ZERO(dst_reg);
1080                break;
1081        case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1082                /* srag %dst,%dst,0(%src) */
1083                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1084                break;
1085        case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1086                if (imm == 0)
1087                        break;
1088                /* sra %dst,imm(%r0) */
1089                EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1090                EMIT_ZERO(dst_reg);
1091                break;
1092        case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1093                if (imm == 0)
1094                        break;
1095                /* srag %dst,%dst,imm(%r0) */
1096                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1097                break;
1098        /*
1099         * BPF_NEG
1100         */
1101        case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1102                /* lcr %dst,%dst */
1103                EMIT2(0x1300, dst_reg, dst_reg);
1104                EMIT_ZERO(dst_reg);
1105                break;
1106        case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1107                /* lcgr %dst,%dst */
1108                EMIT4(0xb9030000, dst_reg, dst_reg);
1109                break;
1110        /*
1111         * BPF_FROM_BE/LE
1112         */
1113        case BPF_ALU | BPF_END | BPF_FROM_BE:
1114                /* s390 is big endian, therefore only clear high order bytes */
1115                switch (imm) {
1116                case 16: /* dst = (u16) cpu_to_be16(dst) */
1117                        /* llghr %dst,%dst */
1118                        EMIT4(0xb9850000, dst_reg, dst_reg);
1119                        if (insn_is_zext(&insn[1]))
1120                                insn_count = 2;
1121                        break;
1122                case 32: /* dst = (u32) cpu_to_be32(dst) */
1123                        if (!fp->aux->verifier_zext)
1124                                /* llgfr %dst,%dst */
1125                                EMIT4(0xb9160000, dst_reg, dst_reg);
1126                        break;
1127                case 64: /* dst = (u64) cpu_to_be64(dst) */
1128                        break;
1129                }
1130                break;
1131        case BPF_ALU | BPF_END | BPF_FROM_LE:
1132                switch (imm) {
1133                case 16: /* dst = (u16) cpu_to_le16(dst) */
1134                        /* lrvr %dst,%dst */
1135                        EMIT4(0xb91f0000, dst_reg, dst_reg);
1136                        /* srl %dst,16(%r0) */
1137                        EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1138                        /* llghr %dst,%dst */
1139                        EMIT4(0xb9850000, dst_reg, dst_reg);
1140                        if (insn_is_zext(&insn[1]))
1141                                insn_count = 2;
1142                        break;
1143                case 32: /* dst = (u32) cpu_to_le32(dst) */
1144                        /* lrvr %dst,%dst */
1145                        EMIT4(0xb91f0000, dst_reg, dst_reg);
1146                        if (!fp->aux->verifier_zext)
1147                                /* llgfr %dst,%dst */
1148                                EMIT4(0xb9160000, dst_reg, dst_reg);
1149                        break;
1150                case 64: /* dst = (u64) cpu_to_le64(dst) */
1151                        /* lrvgr %dst,%dst */
1152                        EMIT4(0xb90f0000, dst_reg, dst_reg);
1153                        break;
1154                }
1155                break;
1156        /*
1157         * BPF_ST(X)
1158         */
1159        case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1160                /* stcy %src,off(%dst) */
1161                EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1162                jit->seen |= SEEN_MEM;
1163                break;
1164        case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1165                /* sthy %src,off(%dst) */
1166                EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1167                jit->seen |= SEEN_MEM;
1168                break;
1169        case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1170                /* sty %src,off(%dst) */
1171                EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1172                jit->seen |= SEEN_MEM;
1173                break;
1174        case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1175                /* stg %src,off(%dst) */
1176                EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1177                jit->seen |= SEEN_MEM;
1178                break;
1179        case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1180                /* lhi %w0,imm */
1181                EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1182                /* stcy %w0,off(dst) */
1183                EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1184                jit->seen |= SEEN_MEM;
1185                break;
1186        case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1187                /* lhi %w0,imm */
1188                EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1189                /* sthy %w0,off(dst) */
1190                EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1191                jit->seen |= SEEN_MEM;
1192                break;
1193        case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1194                /* llilf %w0,imm  */
1195                EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1196                /* sty %w0,off(%dst) */
1197                EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1198                jit->seen |= SEEN_MEM;
1199                break;
1200        case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1201                /* lgfi %w0,imm */
1202                EMIT6_IMM(0xc0010000, REG_W0, imm);
1203                /* stg %w0,off(%dst) */
1204                EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1205                jit->seen |= SEEN_MEM;
1206                break;
1207        /*
1208         * BPF_ATOMIC
1209         */
1210        case BPF_STX | BPF_ATOMIC | BPF_DW:
1211        case BPF_STX | BPF_ATOMIC | BPF_W:
1212        {
1213                bool is32 = BPF_SIZE(insn->code) == BPF_W;
1214
1215                switch (insn->imm) {
1216/* {op32|op64} {%w0|%src},%src,off(%dst) */
1217#define EMIT_ATOMIC(op32, op64) do {                                    \
1218        EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64),               \
1219                      (insn->imm & BPF_FETCH) ? src_reg : REG_W0,       \
1220                      src_reg, dst_reg, off);                           \
1221        if (is32 && (insn->imm & BPF_FETCH))                            \
1222                EMIT_ZERO(src_reg);                                     \
1223} while (0)
1224                case BPF_ADD:
1225                case BPF_ADD | BPF_FETCH:
1226                        /* {laal|laalg} */
1227                        EMIT_ATOMIC(0x00fa, 0x00ea);
1228                        break;
1229                case BPF_AND:
1230                case BPF_AND | BPF_FETCH:
1231                        /* {lan|lang} */
1232                        EMIT_ATOMIC(0x00f4, 0x00e4);
1233                        break;
1234                case BPF_OR:
1235                case BPF_OR | BPF_FETCH:
1236                        /* {lao|laog} */
1237                        EMIT_ATOMIC(0x00f6, 0x00e6);
1238                        break;
1239                case BPF_XOR:
1240                case BPF_XOR | BPF_FETCH:
1241                        /* {lax|laxg} */
1242                        EMIT_ATOMIC(0x00f7, 0x00e7);
1243                        break;
1244#undef EMIT_ATOMIC
1245                case BPF_XCHG:
1246                        /* {ly|lg} %w0,off(%dst) */
1247                        EMIT6_DISP_LH(0xe3000000,
1248                                      is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
1249                                      dst_reg, off);
1250                        /* 0: {csy|csg} %w0,%src,off(%dst) */
1251                        EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1252                                      REG_W0, src_reg, dst_reg, off);
1253                        /* brc 4,0b */
1254                        EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
1255                        /* {llgfr|lgr} %src,%w0 */
1256                        EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
1257                        if (is32 && insn_is_zext(&insn[1]))
1258                                insn_count = 2;
1259                        break;
1260                case BPF_CMPXCHG:
1261                        /* 0: {csy|csg} %b0,%src,off(%dst) */
1262                        EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
1263                                      BPF_REG_0, src_reg, dst_reg, off);
1264                        break;
1265                default:
1266                        pr_err("Unknown atomic operation %02x\n", insn->imm);
1267                        return -1;
1268                }
1269
1270                jit->seen |= SEEN_MEM;
1271                break;
1272        }
1273        /*
1274         * BPF_LDX
1275         */
1276        case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1277        case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1278                /* llgc %dst,0(off,%src) */
1279                EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1280                jit->seen |= SEEN_MEM;
1281                if (insn_is_zext(&insn[1]))
1282                        insn_count = 2;
1283                break;
1284        case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1285        case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1286                /* llgh %dst,0(off,%src) */
1287                EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1288                jit->seen |= SEEN_MEM;
1289                if (insn_is_zext(&insn[1]))
1290                        insn_count = 2;
1291                break;
1292        case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1293        case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1294                /* llgf %dst,off(%src) */
1295                jit->seen |= SEEN_MEM;
1296                EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1297                if (insn_is_zext(&insn[1]))
1298                        insn_count = 2;
1299                break;
1300        case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1301        case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1302                /* lg %dst,0(off,%src) */
1303                jit->seen |= SEEN_MEM;
1304                EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1305                break;
1306        /*
1307         * BPF_JMP / CALL
1308         */
1309        case BPF_JMP | BPF_CALL:
1310        {
1311                u64 func;
1312                bool func_addr_fixed;
1313                int ret;
1314
1315                ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1316                                            &func, &func_addr_fixed);
1317                if (ret < 0)
1318                        return -1;
1319
1320                REG_SET_SEEN(BPF_REG_5);
1321                jit->seen |= SEEN_FUNC;
1322                /* lgrl %w1,func */
1323                EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1324                if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
1325                        /* brasl %r14,__s390_indirect_jump_r1 */
1326                        EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1327                } else {
1328                        /* basr %r14,%w1 */
1329                        EMIT2(0x0d00, REG_14, REG_W1);
1330                }
1331                /* lgr %b0,%r2: load return value into %b0 */
1332                EMIT4(0xb9040000, BPF_REG_0, REG_2);
1333                break;
1334        }
1335        case BPF_JMP | BPF_TAIL_CALL: {
1336                int patch_1_clrj, patch_2_clij, patch_3_brc;
1337
1338                /*
1339                 * Implicit input:
1340                 *  B1: pointer to ctx
1341                 *  B2: pointer to bpf_array
1342                 *  B3: index in bpf_array
1343                 */
1344                jit->seen |= SEEN_TAIL_CALL;
1345
1346                /*
1347                 * if (index >= array->map.max_entries)
1348                 *         goto out;
1349                 */
1350
1351                /* llgf %w1,map.max_entries(%b2) */
1352                EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1353                              offsetof(struct bpf_array, map.max_entries));
1354                /* if ((u32)%b3 >= (u32)%w1) goto out; */
1355                /* clrj %b3,%w1,0xa,out */
1356                patch_1_clrj = jit->prg;
1357                EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1358                                 jit->prg);
1359
1360                /*
1361                 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1362                 *         goto out;
1363                 */
1364
1365                if (jit->seen & SEEN_STACK)
1366                        off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1367                else
1368                        off = STK_OFF_TCCNT;
1369                /* lhi %w0,1 */
1370                EMIT4_IMM(0xa7080000, REG_W0, 1);
1371                /* laal %w1,%w0,off(%r15) */
1372                EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1373                /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
1374                patch_2_clij = jit->prg;
1375                EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
1376                                 2, jit->prg);
1377
1378                /*
1379                 * prog = array->ptrs[index];
1380                 * if (prog == NULL)
1381                 *         goto out;
1382                 */
1383
1384                /* llgfr %r1,%b3: %r1 = (u32) index */
1385                EMIT4(0xb9160000, REG_1, BPF_REG_3);
1386                /* sllg %r1,%r1,3: %r1 *= 8 */
1387                EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1388                /* ltg %r1,prog(%b2,%r1) */
1389                EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1390                              REG_1, offsetof(struct bpf_array, ptrs));
1391                /* brc 0x8,out */
1392                patch_3_brc = jit->prg;
1393                EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1394
1395                /*
1396                 * Restore registers before calling function
1397                 */
1398                save_restore_regs(jit, REGS_RESTORE, stack_depth);
1399
1400                /*
1401                 * goto *(prog->bpf_func + tail_call_start);
1402                 */
1403
1404                /* lg %r1,bpf_func(%r1) */
1405                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1406                              offsetof(struct bpf_prog, bpf_func));
1407                /* bc 0xf,tail_call_start(%r1) */
1408                _EMIT4(0x47f01000 + jit->tail_call_start);
1409                /* out: */
1410                if (jit->prg_buf) {
1411                        *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1412                                (jit->prg - patch_1_clrj) >> 1;
1413                        *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1414                                (jit->prg - patch_2_clij) >> 1;
1415                        *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1416                                (jit->prg - patch_3_brc) >> 1;
1417                }
1418                break;
1419        }
1420        case BPF_JMP | BPF_EXIT: /* return b0 */
1421                last = (i == fp->len - 1) ? 1 : 0;
1422                if (last)
1423                        break;
1424                if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1425                        /* brc 0xf, <exit> */
1426                        EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1427                else
1428                        /* brcl 0xf, <exit> */
1429                        EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1430                break;
1431        /*
1432         * Branch relative (number of skipped instructions) to offset on
1433         * condition.
1434         *
1435         * Condition code to mask mapping:
1436         *
1437         * CC | Description        | Mask
1438         * ------------------------------
1439         * 0  | Operands equal     |    8
1440         * 1  | First operand low  |    4
1441         * 2  | First operand high |    2
1442         * 3  | Unused             |    1
1443         *
1444         * For s390x relative branches: ip = ip + off_bytes
1445         * For BPF relative branches:   insn = insn + off_insns + 1
1446         *
1447         * For example for s390x with offset 0 we jump to the branch
1448         * instruction itself (loop) and for BPF with offset 0 we
1449         * branch to the instruction behind the branch.
1450         */
1451        case BPF_JMP | BPF_JA: /* if (true) */
1452                mask = 0xf000; /* j */
1453                goto branch_oc;
1454        case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1455        case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1456                mask = 0x2000; /* jh */
1457                goto branch_ks;
1458        case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1459        case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1460                mask = 0x4000; /* jl */
1461                goto branch_ks;
1462        case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1463        case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1464                mask = 0xa000; /* jhe */
1465                goto branch_ks;
1466        case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1467        case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1468                mask = 0xc000; /* jle */
1469                goto branch_ks;
1470        case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1471        case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1472                mask = 0x2000; /* jh */
1473                goto branch_ku;
1474        case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1475        case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1476                mask = 0x4000; /* jl */
1477                goto branch_ku;
1478        case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1479        case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1480                mask = 0xa000; /* jhe */
1481                goto branch_ku;
1482        case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1483        case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1484                mask = 0xc000; /* jle */
1485                goto branch_ku;
1486        case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1487        case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1488                mask = 0x7000; /* jne */
1489                goto branch_ku;
1490        case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1491        case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1492                mask = 0x8000; /* je */
1493                goto branch_ku;
1494        case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1495        case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1496                mask = 0x7000; /* jnz */
1497                if (BPF_CLASS(insn->code) == BPF_JMP32) {
1498                        /* llilf %w1,imm (load zero extend imm) */
1499                        EMIT6_IMM(0xc00f0000, REG_W1, imm);
1500                        /* nr %w1,%dst */
1501                        EMIT2(0x1400, REG_W1, dst_reg);
1502                } else {
1503                        /* lgfi %w1,imm (load sign extend imm) */
1504                        EMIT6_IMM(0xc0010000, REG_W1, imm);
1505                        /* ngr %w1,%dst */
1506                        EMIT4(0xb9800000, REG_W1, dst_reg);
1507                }
1508                goto branch_oc;
1509
1510        case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1511        case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1512                mask = 0x2000; /* jh */
1513                goto branch_xs;
1514        case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1515        case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1516                mask = 0x4000; /* jl */
1517                goto branch_xs;
1518        case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1519        case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1520                mask = 0xa000; /* jhe */
1521                goto branch_xs;
1522        case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1523        case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1524                mask = 0xc000; /* jle */
1525                goto branch_xs;
1526        case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1527        case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1528                mask = 0x2000; /* jh */
1529                goto branch_xu;
1530        case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1531        case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1532                mask = 0x4000; /* jl */
1533                goto branch_xu;
1534        case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1535        case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1536                mask = 0xa000; /* jhe */
1537                goto branch_xu;
1538        case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1539        case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1540                mask = 0xc000; /* jle */
1541                goto branch_xu;
1542        case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1543        case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1544                mask = 0x7000; /* jne */
1545                goto branch_xu;
1546        case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1547        case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1548                mask = 0x8000; /* je */
1549                goto branch_xu;
1550        case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1551        case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1552        {
1553                bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1554
1555                mask = 0x7000; /* jnz */
1556                /* nrk or ngrk %w1,%dst,%src */
1557                EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1558                          REG_W1, dst_reg, src_reg);
1559                goto branch_oc;
1560branch_ks:
1561                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1562                /* cfi or cgfi %dst,imm */
1563                EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1564                          dst_reg, imm);
1565                if (!is_first_pass(jit) &&
1566                    can_use_rel(jit, addrs[i + off + 1])) {
1567                        /* brc mask,off */
1568                        EMIT4_PCREL_RIC(0xa7040000,
1569                                        mask >> 12, addrs[i + off + 1]);
1570                } else {
1571                        /* brcl mask,off */
1572                        EMIT6_PCREL_RILC(0xc0040000,
1573                                         mask >> 12, addrs[i + off + 1]);
1574                }
1575                break;
1576branch_ku:
1577                /* lgfi %w1,imm (load sign extend imm) */
1578                src_reg = REG_1;
1579                EMIT6_IMM(0xc0010000, src_reg, imm);
1580                goto branch_xu;
1581branch_xs:
1582                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1583                if (!is_first_pass(jit) &&
1584                    can_use_rel(jit, addrs[i + off + 1])) {
1585                        /* crj or cgrj %dst,%src,mask,off */
1586                        EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1587                                    dst_reg, src_reg, i, off, mask);
1588                } else {
1589                        /* cr or cgr %dst,%src */
1590                        if (is_jmp32)
1591                                EMIT2(0x1900, dst_reg, src_reg);
1592                        else
1593                                EMIT4(0xb9200000, dst_reg, src_reg);
1594                        /* brcl mask,off */
1595                        EMIT6_PCREL_RILC(0xc0040000,
1596                                         mask >> 12, addrs[i + off + 1]);
1597                }
1598                break;
1599branch_xu:
1600                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1601                if (!is_first_pass(jit) &&
1602                    can_use_rel(jit, addrs[i + off + 1])) {
1603                        /* clrj or clgrj %dst,%src,mask,off */
1604                        EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1605                                    dst_reg, src_reg, i, off, mask);
1606                } else {
1607                        /* clr or clgr %dst,%src */
1608                        if (is_jmp32)
1609                                EMIT2(0x1500, dst_reg, src_reg);
1610                        else
1611                                EMIT4(0xb9210000, dst_reg, src_reg);
1612                        /* brcl mask,off */
1613                        EMIT6_PCREL_RILC(0xc0040000,
1614                                         mask >> 12, addrs[i + off + 1]);
1615                }
1616                break;
1617branch_oc:
1618                if (!is_first_pass(jit) &&
1619                    can_use_rel(jit, addrs[i + off + 1])) {
1620                        /* brc mask,off */
1621                        EMIT4_PCREL_RIC(0xa7040000,
1622                                        mask >> 12, addrs[i + off + 1]);
1623                } else {
1624                        /* brcl mask,off */
1625                        EMIT6_PCREL_RILC(0xc0040000,
1626                                         mask >> 12, addrs[i + off + 1]);
1627                }
1628                break;
1629        }
1630        default: /* too complex, give up */
1631                pr_err("Unknown opcode %02x\n", insn->code);
1632                return -1;
1633        }
1634
1635        if (probe_prg != -1) {
1636                /*
1637                 * Handlers of certain exceptions leave psw.addr pointing to
1638                 * the instruction directly after the failing one. Therefore,
1639                 * create two exception table entries and also add a nop in
1640                 * case two probing instructions come directly after each
1641                 * other.
1642                 */
1643                nop_prg = jit->prg;
1644                /* bcr 0,%0 */
1645                _EMIT2(0x0700);
1646                err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1647                if (err < 0)
1648                        return err;
1649        }
1650
1651        return insn_count;
1652}
1653
1654/*
1655 * Return whether new i-th instruction address does not violate any invariant
1656 */
1657static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1658{
1659        /* On the first pass anything goes */
1660        if (is_first_pass(jit))
1661                return true;
1662
1663        /* The codegen pass must not change anything */
1664        if (is_codegen_pass(jit))
1665                return jit->addrs[i] == jit->prg;
1666
1667        /* Passes in between must not increase code size */
1668        return jit->addrs[i] >= jit->prg;
1669}
1670
1671/*
1672 * Update the address of i-th instruction
1673 */
1674static int bpf_set_addr(struct bpf_jit *jit, int i)
1675{
1676        int delta;
1677
1678        if (is_codegen_pass(jit)) {
1679                delta = jit->prg - jit->addrs[i];
1680                if (delta < 0)
1681                        bpf_skip(jit, -delta);
1682        }
1683        if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1684                return -1;
1685        jit->addrs[i] = jit->prg;
1686        return 0;
1687}
1688
1689/*
1690 * Compile eBPF program into s390x code
1691 */
1692static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1693                        bool extra_pass, u32 stack_depth)
1694{
1695        int i, insn_count, lit32_size, lit64_size;
1696
1697        jit->lit32 = jit->lit32_start;
1698        jit->lit64 = jit->lit64_start;
1699        jit->prg = 0;
1700        jit->excnt = 0;
1701
1702        bpf_jit_prologue(jit, stack_depth);
1703        if (bpf_set_addr(jit, 0) < 0)
1704                return -1;
1705        for (i = 0; i < fp->len; i += insn_count) {
1706                insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1707                if (insn_count < 0)
1708                        return -1;
1709                /* Next instruction address */
1710                if (bpf_set_addr(jit, i + insn_count) < 0)
1711                        return -1;
1712        }
1713        bpf_jit_epilogue(jit, stack_depth);
1714
1715        lit32_size = jit->lit32 - jit->lit32_start;
1716        lit64_size = jit->lit64 - jit->lit64_start;
1717        jit->lit32_start = jit->prg;
1718        if (lit32_size)
1719                jit->lit32_start = ALIGN(jit->lit32_start, 4);
1720        jit->lit64_start = jit->lit32_start + lit32_size;
1721        if (lit64_size)
1722                jit->lit64_start = ALIGN(jit->lit64_start, 8);
1723        jit->size = jit->lit64_start + lit64_size;
1724        jit->size_prg = jit->prg;
1725
1726        if (WARN_ON_ONCE(fp->aux->extable &&
1727                         jit->excnt != fp->aux->num_exentries))
1728                /* Verifier bug - too many entries. */
1729                return -1;
1730
1731        return 0;
1732}
1733
1734bool bpf_jit_needs_zext(void)
1735{
1736        return true;
1737}
1738
1739struct s390_jit_data {
1740        struct bpf_binary_header *header;
1741        struct bpf_jit ctx;
1742        int pass;
1743};
1744
1745static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
1746                                               struct bpf_prog *fp)
1747{
1748        struct bpf_binary_header *header;
1749        u32 extable_size;
1750        u32 code_size;
1751
1752        /* We need two entries per insn. */
1753        fp->aux->num_exentries *= 2;
1754
1755        code_size = roundup(jit->size,
1756                            __alignof__(struct exception_table_entry));
1757        extable_size = fp->aux->num_exentries *
1758                sizeof(struct exception_table_entry);
1759        header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
1760                                      8, jit_fill_hole);
1761        if (!header)
1762                return NULL;
1763        fp->aux->extable = (struct exception_table_entry *)
1764                (jit->prg_buf + code_size);
1765        return header;
1766}
1767
1768/*
1769 * Compile eBPF program "fp"
1770 */
1771struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1772{
1773        u32 stack_depth = round_up(fp->aux->stack_depth, 8);
1774        struct bpf_prog *tmp, *orig_fp = fp;
1775        struct bpf_binary_header *header;
1776        struct s390_jit_data *jit_data;
1777        bool tmp_blinded = false;
1778        bool extra_pass = false;
1779        struct bpf_jit jit;
1780        int pass;
1781
1782        if (!fp->jit_requested)
1783                return orig_fp;
1784
1785        tmp = bpf_jit_blind_constants(fp);
1786        /*
1787         * If blinding was requested and we failed during blinding,
1788         * we must fall back to the interpreter.
1789         */
1790        if (IS_ERR(tmp))
1791                return orig_fp;
1792        if (tmp != fp) {
1793                tmp_blinded = true;
1794                fp = tmp;
1795        }
1796
1797        jit_data = fp->aux->jit_data;
1798        if (!jit_data) {
1799                jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1800                if (!jit_data) {
1801                        fp = orig_fp;
1802                        goto out;
1803                }
1804                fp->aux->jit_data = jit_data;
1805        }
1806        if (jit_data->ctx.addrs) {
1807                jit = jit_data->ctx;
1808                header = jit_data->header;
1809                extra_pass = true;
1810                pass = jit_data->pass + 1;
1811                goto skip_init_ctx;
1812        }
1813
1814        memset(&jit, 0, sizeof(jit));
1815        jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1816        if (jit.addrs == NULL) {
1817                fp = orig_fp;
1818                goto out;
1819        }
1820        /*
1821         * Three initial passes:
1822         *   - 1/2: Determine clobbered registers
1823         *   - 3:   Calculate program size and addrs arrray
1824         */
1825        for (pass = 1; pass <= 3; pass++) {
1826                if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1827                        fp = orig_fp;
1828                        goto free_addrs;
1829                }
1830        }
1831        /*
1832         * Final pass: Allocate and generate program
1833         */
1834        header = bpf_jit_alloc(&jit, fp);
1835        if (!header) {
1836                fp = orig_fp;
1837                goto free_addrs;
1838        }
1839skip_init_ctx:
1840        if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1841                bpf_jit_binary_free(header);
1842                fp = orig_fp;
1843                goto free_addrs;
1844        }
1845        if (bpf_jit_enable > 1) {
1846                bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1847                print_fn_code(jit.prg_buf, jit.size_prg);
1848        }
1849        if (!fp->is_func || extra_pass) {
1850                bpf_jit_binary_lock_ro(header);
1851        } else {
1852                jit_data->header = header;
1853                jit_data->ctx = jit;
1854                jit_data->pass = pass;
1855        }
1856        fp->bpf_func = (void *) jit.prg_buf;
1857        fp->jited = 1;
1858        fp->jited_len = jit.size;
1859
1860        if (!fp->is_func || extra_pass) {
1861                bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1862free_addrs:
1863                kvfree(jit.addrs);
1864                kfree(jit_data);
1865                fp->aux->jit_data = NULL;
1866        }
1867out:
1868        if (tmp_blinded)
1869                bpf_jit_prog_release_other(fp, fp == orig_fp ?
1870                                           tmp : orig_fp);
1871        return fp;
1872}
1873