linux/arch/s390/net/bpf_jit_comp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * BPF Jit compiler for s390.
   4 *
   5 * Minimum build requirements:
   6 *
   7 *  - HAVE_MARCH_Z196_FEATURES: laal, laalg
   8 *  - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj
   9 *  - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf
  10 *  - PACK_STACK
  11 *  - 64BIT
  12 *
  13 * Copyright IBM Corp. 2012,2015
  14 *
  15 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  16 *            Michael Holzheu <holzheu@linux.vnet.ibm.com>
  17 */
  18
  19#define KMSG_COMPONENT "bpf_jit"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/netdevice.h>
  23#include <linux/filter.h>
  24#include <linux/init.h>
  25#include <linux/bpf.h>
  26#include <linux/mm.h>
  27#include <linux/kernel.h>
  28#include <asm/cacheflush.h>
  29#include <asm/dis.h>
  30#include <asm/facility.h>
  31#include <asm/nospec-branch.h>
  32#include <asm/set_memory.h>
  33#include "bpf_jit.h"
  34
  35struct bpf_jit {
  36        u32 seen;               /* Flags to remember seen eBPF instructions */
  37        u32 seen_reg[16];       /* Array to remember which registers are used */
  38        u32 *addrs;             /* Array with relative instruction addresses */
  39        u8 *prg_buf;            /* Start of program */
  40        int size;               /* Size of program and literal pool */
  41        int size_prg;           /* Size of program */
  42        int prg;                /* Current position in program */
  43        int lit32_start;        /* Start of 32-bit literal pool */
  44        int lit32;              /* Current position in 32-bit literal pool */
  45        int lit64_start;        /* Start of 64-bit literal pool */
  46        int lit64;              /* Current position in 64-bit literal pool */
  47        int base_ip;            /* Base address for literal pool */
  48        int exit_ip;            /* Address of exit */
  49        int r1_thunk_ip;        /* Address of expoline thunk for 'br %r1' */
  50        int r14_thunk_ip;       /* Address of expoline thunk for 'br %r14' */
  51        int tail_call_start;    /* Tail call start offset */
  52        int excnt;              /* Number of exception table entries */
  53};
  54
  55#define SEEN_MEM        BIT(0)          /* use mem[] for temporary storage */
  56#define SEEN_LITERAL    BIT(1)          /* code uses literals */
  57#define SEEN_FUNC       BIT(2)          /* calls C functions */
  58#define SEEN_TAIL_CALL  BIT(3)          /* code uses tail calls */
  59#define SEEN_STACK      (SEEN_FUNC | SEEN_MEM)
  60
  61/*
  62 * s390 registers
  63 */
  64#define REG_W0          (MAX_BPF_JIT_REG + 0)   /* Work register 1 (even) */
  65#define REG_W1          (MAX_BPF_JIT_REG + 1)   /* Work register 2 (odd) */
  66#define REG_L           (MAX_BPF_JIT_REG + 2)   /* Literal pool register */
  67#define REG_15          (MAX_BPF_JIT_REG + 3)   /* Register 15 */
  68#define REG_0           REG_W0                  /* Register 0 */
  69#define REG_1           REG_W1                  /* Register 1 */
  70#define REG_2           BPF_REG_1               /* Register 2 */
  71#define REG_14          BPF_REG_0               /* Register 14 */
  72
  73/*
  74 * Mapping of BPF registers to s390 registers
  75 */
  76static const int reg2hex[] = {
  77        /* Return code */
  78        [BPF_REG_0]     = 14,
  79        /* Function parameters */
  80        [BPF_REG_1]     = 2,
  81        [BPF_REG_2]     = 3,
  82        [BPF_REG_3]     = 4,
  83        [BPF_REG_4]     = 5,
  84        [BPF_REG_5]     = 6,
  85        /* Call saved registers */
  86        [BPF_REG_6]     = 7,
  87        [BPF_REG_7]     = 8,
  88        [BPF_REG_8]     = 9,
  89        [BPF_REG_9]     = 10,
  90        /* BPF stack pointer */
  91        [BPF_REG_FP]    = 13,
  92        /* Register for blinding */
  93        [BPF_REG_AX]    = 12,
  94        /* Work registers for s390x backend */
  95        [REG_W0]        = 0,
  96        [REG_W1]        = 1,
  97        [REG_L]         = 11,
  98        [REG_15]        = 15,
  99};
 100
 101static inline u32 reg(u32 dst_reg, u32 src_reg)
 102{
 103        return reg2hex[dst_reg] << 4 | reg2hex[src_reg];
 104}
 105
 106static inline u32 reg_high(u32 reg)
 107{
 108        return reg2hex[reg] << 4;
 109}
 110
 111static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
 112{
 113        u32 r1 = reg2hex[b1];
 114
 115        if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15)
 116                jit->seen_reg[r1] = 1;
 117}
 118
 119#define REG_SET_SEEN(b1)                                        \
 120({                                                              \
 121        reg_set_seen(jit, b1);                                  \
 122})
 123
 124#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]]
 125
 126/*
 127 * EMIT macros for code generation
 128 */
 129
 130#define _EMIT2(op)                                              \
 131({                                                              \
 132        if (jit->prg_buf)                                       \
 133                *(u16 *) (jit->prg_buf + jit->prg) = (op);      \
 134        jit->prg += 2;                                          \
 135})
 136
 137#define EMIT2(op, b1, b2)                                       \
 138({                                                              \
 139        _EMIT2((op) | reg(b1, b2));                             \
 140        REG_SET_SEEN(b1);                                       \
 141        REG_SET_SEEN(b2);                                       \
 142})
 143
 144#define _EMIT4(op)                                              \
 145({                                                              \
 146        if (jit->prg_buf)                                       \
 147                *(u32 *) (jit->prg_buf + jit->prg) = (op);      \
 148        jit->prg += 4;                                          \
 149})
 150
 151#define EMIT4(op, b1, b2)                                       \
 152({                                                              \
 153        _EMIT4((op) | reg(b1, b2));                             \
 154        REG_SET_SEEN(b1);                                       \
 155        REG_SET_SEEN(b2);                                       \
 156})
 157
 158#define EMIT4_RRF(op, b1, b2, b3)                               \
 159({                                                              \
 160        _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2));         \
 161        REG_SET_SEEN(b1);                                       \
 162        REG_SET_SEEN(b2);                                       \
 163        REG_SET_SEEN(b3);                                       \
 164})
 165
 166#define _EMIT4_DISP(op, disp)                                   \
 167({                                                              \
 168        unsigned int __disp = (disp) & 0xfff;                   \
 169        _EMIT4((op) | __disp);                                  \
 170})
 171
 172#define EMIT4_DISP(op, b1, b2, disp)                            \
 173({                                                              \
 174        _EMIT4_DISP((op) | reg_high(b1) << 16 |                 \
 175                    reg_high(b2) << 8, (disp));                 \
 176        REG_SET_SEEN(b1);                                       \
 177        REG_SET_SEEN(b2);                                       \
 178})
 179
 180#define EMIT4_IMM(op, b1, imm)                                  \
 181({                                                              \
 182        unsigned int __imm = (imm) & 0xffff;                    \
 183        _EMIT4((op) | reg_high(b1) << 16 | __imm);              \
 184        REG_SET_SEEN(b1);                                       \
 185})
 186
 187#define EMIT4_PCREL(op, pcrel)                                  \
 188({                                                              \
 189        long __pcrel = ((pcrel) >> 1) & 0xffff;                 \
 190        _EMIT4((op) | __pcrel);                                 \
 191})
 192
 193#define EMIT4_PCREL_RIC(op, mask, target)                       \
 194({                                                              \
 195        int __rel = ((target) - jit->prg) / 2;                  \
 196        _EMIT4((op) | (mask) << 20 | (__rel & 0xffff));         \
 197})
 198
 199#define _EMIT6(op1, op2)                                        \
 200({                                                              \
 201        if (jit->prg_buf) {                                     \
 202                *(u32 *) (jit->prg_buf + jit->prg) = (op1);     \
 203                *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
 204        }                                                       \
 205        jit->prg += 6;                                          \
 206})
 207
 208#define _EMIT6_DISP(op1, op2, disp)                             \
 209({                                                              \
 210        unsigned int __disp = (disp) & 0xfff;                   \
 211        _EMIT6((op1) | __disp, op2);                            \
 212})
 213
 214#define _EMIT6_DISP_LH(op1, op2, disp)                          \
 215({                                                              \
 216        u32 _disp = (u32) (disp);                               \
 217        unsigned int __disp_h = _disp & 0xff000;                \
 218        unsigned int __disp_l = _disp & 0x00fff;                \
 219        _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4);        \
 220})
 221
 222#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp)               \
 223({                                                              \
 224        _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 |              \
 225                       reg_high(b3) << 8, op2, disp);           \
 226        REG_SET_SEEN(b1);                                       \
 227        REG_SET_SEEN(b2);                                       \
 228        REG_SET_SEEN(b3);                                       \
 229})
 230
 231#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target)        \
 232({                                                              \
 233        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 234        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff),      \
 235               (op2) | (mask) << 12);                           \
 236        REG_SET_SEEN(b1);                                       \
 237        REG_SET_SEEN(b2);                                       \
 238})
 239
 240#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target)       \
 241({                                                              \
 242        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 243        _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 |          \
 244                (rel & 0xffff), (op2) | ((imm) & 0xff) << 8);   \
 245        REG_SET_SEEN(b1);                                       \
 246        BUILD_BUG_ON(((unsigned long) (imm)) > 0xff);           \
 247})
 248
 249#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask)             \
 250({                                                              \
 251        /* Branch instruction needs 6 bytes */                  \
 252        int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
 253        _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
 254        REG_SET_SEEN(b1);                                       \
 255        REG_SET_SEEN(b2);                                       \
 256})
 257
 258#define EMIT6_PCREL_RILB(op, b, target)                         \
 259({                                                              \
 260        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 261        _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
 262        REG_SET_SEEN(b);                                        \
 263})
 264
 265#define EMIT6_PCREL_RIL(op, target)                             \
 266({                                                              \
 267        unsigned int rel = (int)((target) - jit->prg) / 2;      \
 268        _EMIT6((op) | rel >> 16, rel & 0xffff);                 \
 269})
 270
 271#define EMIT6_PCREL_RILC(op, mask, target)                      \
 272({                                                              \
 273        EMIT6_PCREL_RIL((op) | (mask) << 20, (target));         \
 274})
 275
 276#define _EMIT6_IMM(op, imm)                                     \
 277({                                                              \
 278        unsigned int __imm = (imm);                             \
 279        _EMIT6((op) | (__imm >> 16), __imm & 0xffff);           \
 280})
 281
 282#define EMIT6_IMM(op, b1, imm)                                  \
 283({                                                              \
 284        _EMIT6_IMM((op) | reg_high(b1) << 16, imm);             \
 285        REG_SET_SEEN(b1);                                       \
 286})
 287
 288#define _EMIT_CONST_U32(val)                                    \
 289({                                                              \
 290        unsigned int ret;                                       \
 291        ret = jit->lit32;                                       \
 292        if (jit->prg_buf)                                       \
 293                *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
 294        jit->lit32 += 4;                                        \
 295        ret;                                                    \
 296})
 297
 298#define EMIT_CONST_U32(val)                                     \
 299({                                                              \
 300        jit->seen |= SEEN_LITERAL;                              \
 301        _EMIT_CONST_U32(val) - jit->base_ip;                    \
 302})
 303
 304#define _EMIT_CONST_U64(val)                                    \
 305({                                                              \
 306        unsigned int ret;                                       \
 307        ret = jit->lit64;                                       \
 308        if (jit->prg_buf)                                       \
 309                *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
 310        jit->lit64 += 8;                                        \
 311        ret;                                                    \
 312})
 313
 314#define EMIT_CONST_U64(val)                                     \
 315({                                                              \
 316        jit->seen |= SEEN_LITERAL;                              \
 317        _EMIT_CONST_U64(val) - jit->base_ip;                    \
 318})
 319
 320#define EMIT_ZERO(b1)                                           \
 321({                                                              \
 322        if (!fp->aux->verifier_zext) {                          \
 323                /* llgfr %dst,%dst (zero extend to 64 bit) */   \
 324                EMIT4(0xb9160000, b1, b1);                      \
 325                REG_SET_SEEN(b1);                               \
 326        }                                                       \
 327})
 328
 329/*
 330 * Return whether this is the first pass. The first pass is special, since we
 331 * don't know any sizes yet, and thus must be conservative.
 332 */
 333static bool is_first_pass(struct bpf_jit *jit)
 334{
 335        return jit->size == 0;
 336}
 337
 338/*
 339 * Return whether this is the code generation pass. The code generation pass is
 340 * special, since we should change as little as possible.
 341 */
 342static bool is_codegen_pass(struct bpf_jit *jit)
 343{
 344        return jit->prg_buf;
 345}
 346
 347/*
 348 * Return whether "rel" can be encoded as a short PC-relative offset
 349 */
 350static bool is_valid_rel(int rel)
 351{
 352        return rel >= -65536 && rel <= 65534;
 353}
 354
 355/*
 356 * Return whether "off" can be reached using a short PC-relative offset
 357 */
 358static bool can_use_rel(struct bpf_jit *jit, int off)
 359{
 360        return is_valid_rel(off - jit->prg);
 361}
 362
 363/*
 364 * Return whether given displacement can be encoded using
 365 * Long-Displacement Facility
 366 */
 367static bool is_valid_ldisp(int disp)
 368{
 369        return disp >= -524288 && disp <= 524287;
 370}
 371
 372/*
 373 * Return whether the next 32-bit literal pool entry can be referenced using
 374 * Long-Displacement Facility
 375 */
 376static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
 377{
 378        return is_valid_ldisp(jit->lit32 - jit->base_ip);
 379}
 380
 381/*
 382 * Return whether the next 64-bit literal pool entry can be referenced using
 383 * Long-Displacement Facility
 384 */
 385static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
 386{
 387        return is_valid_ldisp(jit->lit64 - jit->base_ip);
 388}
 389
 390/*
 391 * Fill whole space with illegal instructions
 392 */
 393static void jit_fill_hole(void *area, unsigned int size)
 394{
 395        memset(area, 0, size);
 396}
 397
 398/*
 399 * Save registers from "rs" (register start) to "re" (register end) on stack
 400 */
 401static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
 402{
 403        u32 off = STK_OFF_R6 + (rs - 6) * 8;
 404
 405        if (rs == re)
 406                /* stg %rs,off(%r15) */
 407                _EMIT6(0xe300f000 | rs << 20 | off, 0x0024);
 408        else
 409                /* stmg %rs,%re,off(%r15) */
 410                _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0024, off);
 411}
 412
 413/*
 414 * Restore registers from "rs" (register start) to "re" (register end) on stack
 415 */
 416static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
 417{
 418        u32 off = STK_OFF_R6 + (rs - 6) * 8;
 419
 420        if (jit->seen & SEEN_STACK)
 421                off += STK_OFF + stack_depth;
 422
 423        if (rs == re)
 424                /* lg %rs,off(%r15) */
 425                _EMIT6(0xe300f000 | rs << 20 | off, 0x0004);
 426        else
 427                /* lmg %rs,%re,off(%r15) */
 428                _EMIT6_DISP(0xeb00f000 | rs << 20 | re << 16, 0x0004, off);
 429}
 430
 431/*
 432 * Return first seen register (from start)
 433 */
 434static int get_start(struct bpf_jit *jit, int start)
 435{
 436        int i;
 437
 438        for (i = start; i <= 15; i++) {
 439                if (jit->seen_reg[i])
 440                        return i;
 441        }
 442        return 0;
 443}
 444
 445/*
 446 * Return last seen register (from start) (gap >= 2)
 447 */
 448static int get_end(struct bpf_jit *jit, int start)
 449{
 450        int i;
 451
 452        for (i = start; i < 15; i++) {
 453                if (!jit->seen_reg[i] && !jit->seen_reg[i + 1])
 454                        return i - 1;
 455        }
 456        return jit->seen_reg[15] ? 15 : 14;
 457}
 458
 459#define REGS_SAVE       1
 460#define REGS_RESTORE    0
 461/*
 462 * Save and restore clobbered registers (6-15) on stack.
 463 * We save/restore registers in chunks with gap >= 2 registers.
 464 */
 465static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
 466{
 467        const int last = 15, save_restore_size = 6;
 468        int re = 6, rs;
 469
 470        if (is_first_pass(jit)) {
 471                /*
 472                 * We don't know yet which registers are used. Reserve space
 473                 * conservatively.
 474                 */
 475                jit->prg += (last - re + 1) * save_restore_size;
 476                return;
 477        }
 478
 479        do {
 480                rs = get_start(jit, re);
 481                if (!rs)
 482                        break;
 483                re = get_end(jit, rs + 1);
 484                if (op == REGS_SAVE)
 485                        save_regs(jit, rs, re);
 486                else
 487                        restore_regs(jit, rs, re, stack_depth);
 488                re++;
 489        } while (re <= last);
 490}
 491
 492static void bpf_skip(struct bpf_jit *jit, int size)
 493{
 494        if (size >= 6 && !is_valid_rel(size)) {
 495                /* brcl 0xf,size */
 496                EMIT6_PCREL_RIL(0xc0f4000000, size);
 497                size -= 6;
 498        } else if (size >= 4 && is_valid_rel(size)) {
 499                /* brc 0xf,size */
 500                EMIT4_PCREL(0xa7f40000, size);
 501                size -= 4;
 502        }
 503        while (size >= 2) {
 504                /* bcr 0,%0 */
 505                _EMIT2(0x0700);
 506                size -= 2;
 507        }
 508}
 509
 510/*
 511 * Emit function prologue
 512 *
 513 * Save registers and create stack frame if necessary.
 514 * See stack frame layout desription in "bpf_jit.h"!
 515 */
 516static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 517{
 518        if (jit->seen & SEEN_TAIL_CALL) {
 519                /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
 520                _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
 521        } else {
 522                /*
 523                 * There are no tail calls. Insert nops in order to have
 524                 * tail_call_start at a predictable offset.
 525                 */
 526                bpf_skip(jit, 6);
 527        }
 528        /* Tail calls have to skip above initialization */
 529        jit->tail_call_start = jit->prg;
 530        /* Save registers */
 531        save_restore_regs(jit, REGS_SAVE, stack_depth);
 532        /* Setup literal pool */
 533        if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
 534                if (!is_first_pass(jit) &&
 535                    is_valid_ldisp(jit->size - (jit->prg + 2))) {
 536                        /* basr %l,0 */
 537                        EMIT2(0x0d00, REG_L, REG_0);
 538                        jit->base_ip = jit->prg;
 539                } else {
 540                        /* larl %l,lit32_start */
 541                        EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
 542                        jit->base_ip = jit->lit32_start;
 543                }
 544        }
 545        /* Setup stack and backchain */
 546        if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
 547                if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 548                        /* lgr %w1,%r15 (backchain) */
 549                        EMIT4(0xb9040000, REG_W1, REG_15);
 550                /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
 551                EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
 552                /* aghi %r15,-STK_OFF */
 553                EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
 554                if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
 555                        /* stg %w1,152(%r15) (backchain) */
 556                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 557                                      REG_15, 152);
 558        }
 559}
 560
 561/*
 562 * Function epilogue
 563 */
 564static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
 565{
 566        jit->exit_ip = jit->prg;
 567        /* Load exit code: lgr %r2,%b0 */
 568        EMIT4(0xb9040000, REG_2, BPF_REG_0);
 569        /* Restore registers */
 570        save_restore_regs(jit, REGS_RESTORE, stack_depth);
 571        if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
 572                jit->r14_thunk_ip = jit->prg;
 573                /* Generate __s390_indirect_jump_r14 thunk */
 574                if (test_facility(35)) {
 575                        /* exrl %r0,.+10 */
 576                        EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 577                } else {
 578                        /* larl %r1,.+14 */
 579                        EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
 580                        /* ex 0,0(%r1) */
 581                        EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
 582                }
 583                /* j . */
 584                EMIT4_PCREL(0xa7f40000, 0);
 585        }
 586        /* br %r14 */
 587        _EMIT2(0x07fe);
 588
 589        if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
 590            (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
 591                jit->r1_thunk_ip = jit->prg;
 592                /* Generate __s390_indirect_jump_r1 thunk */
 593                if (test_facility(35)) {
 594                        /* exrl %r0,.+10 */
 595                        EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
 596                        /* j . */
 597                        EMIT4_PCREL(0xa7f40000, 0);
 598                        /* br %r1 */
 599                        _EMIT2(0x07f1);
 600                } else {
 601                        /* ex 0,S390_lowcore.br_r1_tampoline */
 602                        EMIT4_DISP(0x44000000, REG_0, REG_0,
 603                                   offsetof(struct lowcore, br_r1_trampoline));
 604                        /* j . */
 605                        EMIT4_PCREL(0xa7f40000, 0);
 606                }
 607        }
 608}
 609
 610static int get_probe_mem_regno(const u8 *insn)
 611{
 612        /*
 613         * insn must point to llgc, llgh, llgf or lg, which have destination
 614         * register at the same position.
 615         */
 616        if (insn[0] != 0xe3) /* common llgc, llgh, llgf and lg prefix */
 617                return -1;
 618        if (insn[5] != 0x90 && /* llgc */
 619            insn[5] != 0x91 && /* llgh */
 620            insn[5] != 0x16 && /* llgf */
 621            insn[5] != 0x04) /* lg */
 622                return -1;
 623        return insn[1] >> 4;
 624}
 625
 626static bool ex_handler_bpf(const struct exception_table_entry *x,
 627                           struct pt_regs *regs)
 628{
 629        int regno;
 630        u8 *insn;
 631
 632        regs->psw.addr = extable_fixup(x);
 633        insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16);
 634        regno = get_probe_mem_regno(insn);
 635        if (WARN_ON_ONCE(regno < 0))
 636                /* JIT bug - unexpected instruction. */
 637                return false;
 638        regs->gprs[regno] = 0;
 639        return true;
 640}
 641
 642static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp,
 643                             int probe_prg, int nop_prg)
 644{
 645        struct exception_table_entry *ex;
 646        s64 delta;
 647        u8 *insn;
 648        int prg;
 649        int i;
 650
 651        if (!fp->aux->extable)
 652                /* Do nothing during early JIT passes. */
 653                return 0;
 654        insn = jit->prg_buf + probe_prg;
 655        if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0))
 656                /* JIT bug - unexpected probe instruction. */
 657                return -1;
 658        if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg))
 659                /* JIT bug - gap between probe and nop instructions. */
 660                return -1;
 661        for (i = 0; i < 2; i++) {
 662                if (WARN_ON_ONCE(jit->excnt >= fp->aux->num_exentries))
 663                        /* Verifier bug - not enough entries. */
 664                        return -1;
 665                ex = &fp->aux->extable[jit->excnt];
 666                /* Add extable entries for probe and nop instructions. */
 667                prg = i == 0 ? probe_prg : nop_prg;
 668                delta = jit->prg_buf + prg - (u8 *)&ex->insn;
 669                if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 670                        /* JIT bug - code and extable must be close. */
 671                        return -1;
 672                ex->insn = delta;
 673                /*
 674                 * Always land on the nop. Note that extable infrastructure
 675                 * ignores fixup field, it is handled by ex_handler_bpf().
 676                 */
 677                delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup;
 678                if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX))
 679                        /* JIT bug - landing pad and extable must be close. */
 680                        return -1;
 681                ex->fixup = delta;
 682                ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
 683                jit->excnt++;
 684        }
 685        return 0;
 686}
 687
 688/*
 689 * Compile one eBPF instruction into s390x code
 690 *
 691 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
 692 * stack space for the large switch statement.
 693 */
 694static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
 695                                 int i, bool extra_pass, u32 stack_depth)
 696{
 697        struct bpf_insn *insn = &fp->insnsi[i];
 698        u32 dst_reg = insn->dst_reg;
 699        u32 src_reg = insn->src_reg;
 700        int last, insn_count = 1;
 701        u32 *addrs = jit->addrs;
 702        s32 imm = insn->imm;
 703        s16 off = insn->off;
 704        int probe_prg = -1;
 705        unsigned int mask;
 706        int nop_prg;
 707        int err;
 708
 709        if (BPF_CLASS(insn->code) == BPF_LDX &&
 710            BPF_MODE(insn->code) == BPF_PROBE_MEM)
 711                probe_prg = jit->prg;
 712
 713        switch (insn->code) {
 714        /*
 715         * BPF_MOV
 716         */
 717        case BPF_ALU | BPF_MOV | BPF_X: /* dst = (u32) src */
 718                /* llgfr %dst,%src */
 719                EMIT4(0xb9160000, dst_reg, src_reg);
 720                if (insn_is_zext(&insn[1]))
 721                        insn_count = 2;
 722                break;
 723        case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
 724                /* lgr %dst,%src */
 725                EMIT4(0xb9040000, dst_reg, src_reg);
 726                break;
 727        case BPF_ALU | BPF_MOV | BPF_K: /* dst = (u32) imm */
 728                /* llilf %dst,imm */
 729                EMIT6_IMM(0xc00f0000, dst_reg, imm);
 730                if (insn_is_zext(&insn[1]))
 731                        insn_count = 2;
 732                break;
 733        case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = imm */
 734                /* lgfi %dst,imm */
 735                EMIT6_IMM(0xc0010000, dst_reg, imm);
 736                break;
 737        /*
 738         * BPF_LD 64
 739         */
 740        case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
 741        {
 742                /* 16 byte instruction that uses two 'struct bpf_insn' */
 743                u64 imm64;
 744
 745                imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
 746                /* lgrl %dst,imm */
 747                EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
 748                insn_count = 2;
 749                break;
 750        }
 751        /*
 752         * BPF_ADD
 753         */
 754        case BPF_ALU | BPF_ADD | BPF_X: /* dst = (u32) dst + (u32) src */
 755                /* ar %dst,%src */
 756                EMIT2(0x1a00, dst_reg, src_reg);
 757                EMIT_ZERO(dst_reg);
 758                break;
 759        case BPF_ALU64 | BPF_ADD | BPF_X: /* dst = dst + src */
 760                /* agr %dst,%src */
 761                EMIT4(0xb9080000, dst_reg, src_reg);
 762                break;
 763        case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
 764                if (!imm)
 765                        break;
 766                /* alfi %dst,imm */
 767                EMIT6_IMM(0xc20b0000, dst_reg, imm);
 768                EMIT_ZERO(dst_reg);
 769                break;
 770        case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
 771                if (!imm)
 772                        break;
 773                /* agfi %dst,imm */
 774                EMIT6_IMM(0xc2080000, dst_reg, imm);
 775                break;
 776        /*
 777         * BPF_SUB
 778         */
 779        case BPF_ALU | BPF_SUB | BPF_X: /* dst = (u32) dst - (u32) src */
 780                /* sr %dst,%src */
 781                EMIT2(0x1b00, dst_reg, src_reg);
 782                EMIT_ZERO(dst_reg);
 783                break;
 784        case BPF_ALU64 | BPF_SUB | BPF_X: /* dst = dst - src */
 785                /* sgr %dst,%src */
 786                EMIT4(0xb9090000, dst_reg, src_reg);
 787                break;
 788        case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
 789                if (!imm)
 790                        break;
 791                /* alfi %dst,-imm */
 792                EMIT6_IMM(0xc20b0000, dst_reg, -imm);
 793                EMIT_ZERO(dst_reg);
 794                break;
 795        case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
 796                if (!imm)
 797                        break;
 798                /* agfi %dst,-imm */
 799                EMIT6_IMM(0xc2080000, dst_reg, -imm);
 800                break;
 801        /*
 802         * BPF_MUL
 803         */
 804        case BPF_ALU | BPF_MUL | BPF_X: /* dst = (u32) dst * (u32) src */
 805                /* msr %dst,%src */
 806                EMIT4(0xb2520000, dst_reg, src_reg);
 807                EMIT_ZERO(dst_reg);
 808                break;
 809        case BPF_ALU64 | BPF_MUL | BPF_X: /* dst = dst * src */
 810                /* msgr %dst,%src */
 811                EMIT4(0xb90c0000, dst_reg, src_reg);
 812                break;
 813        case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
 814                if (imm == 1)
 815                        break;
 816                /* msfi %r5,imm */
 817                EMIT6_IMM(0xc2010000, dst_reg, imm);
 818                EMIT_ZERO(dst_reg);
 819                break;
 820        case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
 821                if (imm == 1)
 822                        break;
 823                /* msgfi %dst,imm */
 824                EMIT6_IMM(0xc2000000, dst_reg, imm);
 825                break;
 826        /*
 827         * BPF_DIV / BPF_MOD
 828         */
 829        case BPF_ALU | BPF_DIV | BPF_X: /* dst = (u32) dst / (u32) src */
 830        case BPF_ALU | BPF_MOD | BPF_X: /* dst = (u32) dst % (u32) src */
 831        {
 832                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 833
 834                /* lhi %w0,0 */
 835                EMIT4_IMM(0xa7080000, REG_W0, 0);
 836                /* lr %w1,%dst */
 837                EMIT2(0x1800, REG_W1, dst_reg);
 838                /* dlr %w0,%src */
 839                EMIT4(0xb9970000, REG_W0, src_reg);
 840                /* llgfr %dst,%rc */
 841                EMIT4(0xb9160000, dst_reg, rc_reg);
 842                if (insn_is_zext(&insn[1]))
 843                        insn_count = 2;
 844                break;
 845        }
 846        case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
 847        case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
 848        {
 849                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 850
 851                /* lghi %w0,0 */
 852                EMIT4_IMM(0xa7090000, REG_W0, 0);
 853                /* lgr %w1,%dst */
 854                EMIT4(0xb9040000, REG_W1, dst_reg);
 855                /* dlgr %w0,%dst */
 856                EMIT4(0xb9870000, REG_W0, src_reg);
 857                /* lgr %dst,%rc */
 858                EMIT4(0xb9040000, dst_reg, rc_reg);
 859                break;
 860        }
 861        case BPF_ALU | BPF_DIV | BPF_K: /* dst = (u32) dst / (u32) imm */
 862        case BPF_ALU | BPF_MOD | BPF_K: /* dst = (u32) dst % (u32) imm */
 863        {
 864                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 865
 866                if (imm == 1) {
 867                        if (BPF_OP(insn->code) == BPF_MOD)
 868                                /* lhgi %dst,0 */
 869                                EMIT4_IMM(0xa7090000, dst_reg, 0);
 870                        break;
 871                }
 872                /* lhi %w0,0 */
 873                EMIT4_IMM(0xa7080000, REG_W0, 0);
 874                /* lr %w1,%dst */
 875                EMIT2(0x1800, REG_W1, dst_reg);
 876                if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
 877                        /* dl %w0,<d(imm)>(%l) */
 878                        EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
 879                                      EMIT_CONST_U32(imm));
 880                } else {
 881                        /* lgfrl %dst,imm */
 882                        EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
 883                                         _EMIT_CONST_U32(imm));
 884                        jit->seen |= SEEN_LITERAL;
 885                        /* dlr %w0,%dst */
 886                        EMIT4(0xb9970000, REG_W0, dst_reg);
 887                }
 888                /* llgfr %dst,%rc */
 889                EMIT4(0xb9160000, dst_reg, rc_reg);
 890                if (insn_is_zext(&insn[1]))
 891                        insn_count = 2;
 892                break;
 893        }
 894        case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
 895        case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
 896        {
 897                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 898
 899                if (imm == 1) {
 900                        if (BPF_OP(insn->code) == BPF_MOD)
 901                                /* lhgi %dst,0 */
 902                                EMIT4_IMM(0xa7090000, dst_reg, 0);
 903                        break;
 904                }
 905                /* lghi %w0,0 */
 906                EMIT4_IMM(0xa7090000, REG_W0, 0);
 907                /* lgr %w1,%dst */
 908                EMIT4(0xb9040000, REG_W1, dst_reg);
 909                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 910                        /* dlg %w0,<d(imm)>(%l) */
 911                        EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
 912                                      EMIT_CONST_U64(imm));
 913                } else {
 914                        /* lgrl %dst,imm */
 915                        EMIT6_PCREL_RILB(0xc4080000, dst_reg,
 916                                         _EMIT_CONST_U64(imm));
 917                        jit->seen |= SEEN_LITERAL;
 918                        /* dlgr %w0,%dst */
 919                        EMIT4(0xb9870000, REG_W0, dst_reg);
 920                }
 921                /* lgr %dst,%rc */
 922                EMIT4(0xb9040000, dst_reg, rc_reg);
 923                break;
 924        }
 925        /*
 926         * BPF_AND
 927         */
 928        case BPF_ALU | BPF_AND | BPF_X: /* dst = (u32) dst & (u32) src */
 929                /* nr %dst,%src */
 930                EMIT2(0x1400, dst_reg, src_reg);
 931                EMIT_ZERO(dst_reg);
 932                break;
 933        case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
 934                /* ngr %dst,%src */
 935                EMIT4(0xb9800000, dst_reg, src_reg);
 936                break;
 937        case BPF_ALU | BPF_AND | BPF_K: /* dst = (u32) dst & (u32) imm */
 938                /* nilf %dst,imm */
 939                EMIT6_IMM(0xc00b0000, dst_reg, imm);
 940                EMIT_ZERO(dst_reg);
 941                break;
 942        case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
 943                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 944                        /* ng %dst,<d(imm)>(%l) */
 945                        EMIT6_DISP_LH(0xe3000000, 0x0080,
 946                                      dst_reg, REG_0, REG_L,
 947                                      EMIT_CONST_U64(imm));
 948                } else {
 949                        /* lgrl %w0,imm */
 950                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 951                                         _EMIT_CONST_U64(imm));
 952                        jit->seen |= SEEN_LITERAL;
 953                        /* ngr %dst,%w0 */
 954                        EMIT4(0xb9800000, dst_reg, REG_W0);
 955                }
 956                break;
 957        /*
 958         * BPF_OR
 959         */
 960        case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
 961                /* or %dst,%src */
 962                EMIT2(0x1600, dst_reg, src_reg);
 963                EMIT_ZERO(dst_reg);
 964                break;
 965        case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
 966                /* ogr %dst,%src */
 967                EMIT4(0xb9810000, dst_reg, src_reg);
 968                break;
 969        case BPF_ALU | BPF_OR | BPF_K: /* dst = (u32) dst | (u32) imm */
 970                /* oilf %dst,imm */
 971                EMIT6_IMM(0xc00d0000, dst_reg, imm);
 972                EMIT_ZERO(dst_reg);
 973                break;
 974        case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
 975                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
 976                        /* og %dst,<d(imm)>(%l) */
 977                        EMIT6_DISP_LH(0xe3000000, 0x0081,
 978                                      dst_reg, REG_0, REG_L,
 979                                      EMIT_CONST_U64(imm));
 980                } else {
 981                        /* lgrl %w0,imm */
 982                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
 983                                         _EMIT_CONST_U64(imm));
 984                        jit->seen |= SEEN_LITERAL;
 985                        /* ogr %dst,%w0 */
 986                        EMIT4(0xb9810000, dst_reg, REG_W0);
 987                }
 988                break;
 989        /*
 990         * BPF_XOR
 991         */
 992        case BPF_ALU | BPF_XOR | BPF_X: /* dst = (u32) dst ^ (u32) src */
 993                /* xr %dst,%src */
 994                EMIT2(0x1700, dst_reg, src_reg);
 995                EMIT_ZERO(dst_reg);
 996                break;
 997        case BPF_ALU64 | BPF_XOR | BPF_X: /* dst = dst ^ src */
 998                /* xgr %dst,%src */
 999                EMIT4(0xb9820000, dst_reg, src_reg);
1000                break;
1001        case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
1002                if (!imm)
1003                        break;
1004                /* xilf %dst,imm */
1005                EMIT6_IMM(0xc0070000, dst_reg, imm);
1006                EMIT_ZERO(dst_reg);
1007                break;
1008        case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
1009                if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
1010                        /* xg %dst,<d(imm)>(%l) */
1011                        EMIT6_DISP_LH(0xe3000000, 0x0082,
1012                                      dst_reg, REG_0, REG_L,
1013                                      EMIT_CONST_U64(imm));
1014                } else {
1015                        /* lgrl %w0,imm */
1016                        EMIT6_PCREL_RILB(0xc4080000, REG_W0,
1017                                         _EMIT_CONST_U64(imm));
1018                        jit->seen |= SEEN_LITERAL;
1019                        /* xgr %dst,%w0 */
1020                        EMIT4(0xb9820000, dst_reg, REG_W0);
1021                }
1022                break;
1023        /*
1024         * BPF_LSH
1025         */
1026        case BPF_ALU | BPF_LSH | BPF_X: /* dst = (u32) dst << (u32) src */
1027                /* sll %dst,0(%src) */
1028                EMIT4_DISP(0x89000000, dst_reg, src_reg, 0);
1029                EMIT_ZERO(dst_reg);
1030                break;
1031        case BPF_ALU64 | BPF_LSH | BPF_X: /* dst = dst << src */
1032                /* sllg %dst,%dst,0(%src) */
1033                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
1034                break;
1035        case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
1036                if (imm == 0)
1037                        break;
1038                /* sll %dst,imm(%r0) */
1039                EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
1040                EMIT_ZERO(dst_reg);
1041                break;
1042        case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
1043                if (imm == 0)
1044                        break;
1045                /* sllg %dst,%dst,imm(%r0) */
1046                EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, REG_0, imm);
1047                break;
1048        /*
1049         * BPF_RSH
1050         */
1051        case BPF_ALU | BPF_RSH | BPF_X: /* dst = (u32) dst >> (u32) src */
1052                /* srl %dst,0(%src) */
1053                EMIT4_DISP(0x88000000, dst_reg, src_reg, 0);
1054                EMIT_ZERO(dst_reg);
1055                break;
1056        case BPF_ALU64 | BPF_RSH | BPF_X: /* dst = dst >> src */
1057                /* srlg %dst,%dst,0(%src) */
1058                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
1059                break;
1060        case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
1061                if (imm == 0)
1062                        break;
1063                /* srl %dst,imm(%r0) */
1064                EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
1065                EMIT_ZERO(dst_reg);
1066                break;
1067        case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
1068                if (imm == 0)
1069                        break;
1070                /* srlg %dst,%dst,imm(%r0) */
1071                EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, REG_0, imm);
1072                break;
1073        /*
1074         * BPF_ARSH
1075         */
1076        case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
1077                /* sra %dst,%dst,0(%src) */
1078                EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
1079                EMIT_ZERO(dst_reg);
1080                break;
1081        case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
1082                /* srag %dst,%dst,0(%src) */
1083                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
1084                break;
1085        case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
1086                if (imm == 0)
1087                        break;
1088                /* sra %dst,imm(%r0) */
1089                EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
1090                EMIT_ZERO(dst_reg);
1091                break;
1092        case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
1093                if (imm == 0)
1094                        break;
1095                /* srag %dst,%dst,imm(%r0) */
1096                EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, REG_0, imm);
1097                break;
1098        /*
1099         * BPF_NEG
1100         */
1101        case BPF_ALU | BPF_NEG: /* dst = (u32) -dst */
1102                /* lcr %dst,%dst */
1103                EMIT2(0x1300, dst_reg, dst_reg);
1104                EMIT_ZERO(dst_reg);
1105                break;
1106        case BPF_ALU64 | BPF_NEG: /* dst = -dst */
1107                /* lcgr %dst,%dst */
1108                EMIT4(0xb9030000, dst_reg, dst_reg);
1109                break;
1110        /*
1111         * BPF_FROM_BE/LE
1112         */
1113        case BPF_ALU | BPF_END | BPF_FROM_BE:
1114                /* s390 is big endian, therefore only clear high order bytes */
1115                switch (imm) {
1116                case 16: /* dst = (u16) cpu_to_be16(dst) */
1117                        /* llghr %dst,%dst */
1118                        EMIT4(0xb9850000, dst_reg, dst_reg);
1119                        if (insn_is_zext(&insn[1]))
1120                                insn_count = 2;
1121                        break;
1122                case 32: /* dst = (u32) cpu_to_be32(dst) */
1123                        if (!fp->aux->verifier_zext)
1124                                /* llgfr %dst,%dst */
1125                                EMIT4(0xb9160000, dst_reg, dst_reg);
1126                        break;
1127                case 64: /* dst = (u64) cpu_to_be64(dst) */
1128                        break;
1129                }
1130                break;
1131        case BPF_ALU | BPF_END | BPF_FROM_LE:
1132                switch (imm) {
1133                case 16: /* dst = (u16) cpu_to_le16(dst) */
1134                        /* lrvr %dst,%dst */
1135                        EMIT4(0xb91f0000, dst_reg, dst_reg);
1136                        /* srl %dst,16(%r0) */
1137                        EMIT4_DISP(0x88000000, dst_reg, REG_0, 16);
1138                        /* llghr %dst,%dst */
1139                        EMIT4(0xb9850000, dst_reg, dst_reg);
1140                        if (insn_is_zext(&insn[1]))
1141                                insn_count = 2;
1142                        break;
1143                case 32: /* dst = (u32) cpu_to_le32(dst) */
1144                        /* lrvr %dst,%dst */
1145                        EMIT4(0xb91f0000, dst_reg, dst_reg);
1146                        if (!fp->aux->verifier_zext)
1147                                /* llgfr %dst,%dst */
1148                                EMIT4(0xb9160000, dst_reg, dst_reg);
1149                        break;
1150                case 64: /* dst = (u64) cpu_to_le64(dst) */
1151                        /* lrvgr %dst,%dst */
1152                        EMIT4(0xb90f0000, dst_reg, dst_reg);
1153                        break;
1154                }
1155                break;
1156        /*
1157         * BPF_ST(X)
1158         */
1159        case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */
1160                /* stcy %src,off(%dst) */
1161                EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off);
1162                jit->seen |= SEEN_MEM;
1163                break;
1164        case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
1165                /* sthy %src,off(%dst) */
1166                EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off);
1167                jit->seen |= SEEN_MEM;
1168                break;
1169        case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
1170                /* sty %src,off(%dst) */
1171                EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off);
1172                jit->seen |= SEEN_MEM;
1173                break;
1174        case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
1175                /* stg %src,off(%dst) */
1176                EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off);
1177                jit->seen |= SEEN_MEM;
1178                break;
1179        case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
1180                /* lhi %w0,imm */
1181                EMIT4_IMM(0xa7080000, REG_W0, (u8) imm);
1182                /* stcy %w0,off(dst) */
1183                EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off);
1184                jit->seen |= SEEN_MEM;
1185                break;
1186        case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
1187                /* lhi %w0,imm */
1188                EMIT4_IMM(0xa7080000, REG_W0, (u16) imm);
1189                /* sthy %w0,off(dst) */
1190                EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off);
1191                jit->seen |= SEEN_MEM;
1192                break;
1193        case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
1194                /* llilf %w0,imm  */
1195                EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm);
1196                /* sty %w0,off(%dst) */
1197                EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off);
1198                jit->seen |= SEEN_MEM;
1199                break;
1200        case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
1201                /* lgfi %w0,imm */
1202                EMIT6_IMM(0xc0010000, REG_W0, imm);
1203                /* stg %w0,off(%dst) */
1204                EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off);
1205                jit->seen |= SEEN_MEM;
1206                break;
1207        /*
1208         * BPF_STX XADD (atomic_add)
1209         */
1210        case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
1211                /* laal %w0,%src,off(%dst) */
1212                EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg,
1213                              dst_reg, off);
1214                jit->seen |= SEEN_MEM;
1215                break;
1216        case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
1217                /* laalg %w0,%src,off(%dst) */
1218                EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg,
1219                              dst_reg, off);
1220                jit->seen |= SEEN_MEM;
1221                break;
1222        /*
1223         * BPF_LDX
1224         */
1225        case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
1226        case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1227                /* llgc %dst,0(off,%src) */
1228                EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off);
1229                jit->seen |= SEEN_MEM;
1230                if (insn_is_zext(&insn[1]))
1231                        insn_count = 2;
1232                break;
1233        case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
1234        case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1235                /* llgh %dst,0(off,%src) */
1236                EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off);
1237                jit->seen |= SEEN_MEM;
1238                if (insn_is_zext(&insn[1]))
1239                        insn_count = 2;
1240                break;
1241        case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
1242        case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1243                /* llgf %dst,off(%src) */
1244                jit->seen |= SEEN_MEM;
1245                EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off);
1246                if (insn_is_zext(&insn[1]))
1247                        insn_count = 2;
1248                break;
1249        case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
1250        case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1251                /* lg %dst,0(off,%src) */
1252                jit->seen |= SEEN_MEM;
1253                EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off);
1254                break;
1255        /*
1256         * BPF_JMP / CALL
1257         */
1258        case BPF_JMP | BPF_CALL:
1259        {
1260                u64 func;
1261                bool func_addr_fixed;
1262                int ret;
1263
1264                ret = bpf_jit_get_func_addr(fp, insn, extra_pass,
1265                                            &func, &func_addr_fixed);
1266                if (ret < 0)
1267                        return -1;
1268
1269                REG_SET_SEEN(BPF_REG_5);
1270                jit->seen |= SEEN_FUNC;
1271                /* lgrl %w1,func */
1272                EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
1273                if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
1274                        /* brasl %r14,__s390_indirect_jump_r1 */
1275                        EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
1276                } else {
1277                        /* basr %r14,%w1 */
1278                        EMIT2(0x0d00, REG_14, REG_W1);
1279                }
1280                /* lgr %b0,%r2: load return value into %b0 */
1281                EMIT4(0xb9040000, BPF_REG_0, REG_2);
1282                break;
1283        }
1284        case BPF_JMP | BPF_TAIL_CALL: {
1285                int patch_1_clrj, patch_2_clij, patch_3_brc;
1286
1287                /*
1288                 * Implicit input:
1289                 *  B1: pointer to ctx
1290                 *  B2: pointer to bpf_array
1291                 *  B3: index in bpf_array
1292                 */
1293                jit->seen |= SEEN_TAIL_CALL;
1294
1295                /*
1296                 * if (index >= array->map.max_entries)
1297                 *         goto out;
1298                 */
1299
1300                /* llgf %w1,map.max_entries(%b2) */
1301                EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1302                              offsetof(struct bpf_array, map.max_entries));
1303                /* if ((u32)%b3 >= (u32)%w1) goto out; */
1304                /* clrj %b3,%w1,0xa,out */
1305                patch_1_clrj = jit->prg;
1306                EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
1307                                 jit->prg);
1308
1309                /*
1310                 * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
1311                 *         goto out;
1312                 */
1313
1314                if (jit->seen & SEEN_STACK)
1315                        off = STK_OFF_TCCNT + STK_OFF + stack_depth;
1316                else
1317                        off = STK_OFF_TCCNT;
1318                /* lhi %w0,1 */
1319                EMIT4_IMM(0xa7080000, REG_W0, 1);
1320                /* laal %w1,%w0,off(%r15) */
1321                EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
1322                /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
1323                patch_2_clij = jit->prg;
1324                EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
1325                                 2, jit->prg);
1326
1327                /*
1328                 * prog = array->ptrs[index];
1329                 * if (prog == NULL)
1330                 *         goto out;
1331                 */
1332
1333                /* llgfr %r1,%b3: %r1 = (u32) index */
1334                EMIT4(0xb9160000, REG_1, BPF_REG_3);
1335                /* sllg %r1,%r1,3: %r1 *= 8 */
1336                EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1337                /* ltg %r1,prog(%b2,%r1) */
1338                EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
1339                              REG_1, offsetof(struct bpf_array, ptrs));
1340                /* brc 0x8,out */
1341                patch_3_brc = jit->prg;
1342                EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
1343
1344                /*
1345                 * Restore registers before calling function
1346                 */
1347                save_restore_regs(jit, REGS_RESTORE, stack_depth);
1348
1349                /*
1350                 * goto *(prog->bpf_func + tail_call_start);
1351                 */
1352
1353                /* lg %r1,bpf_func(%r1) */
1354                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
1355                              offsetof(struct bpf_prog, bpf_func));
1356                /* bc 0xf,tail_call_start(%r1) */
1357                _EMIT4(0x47f01000 + jit->tail_call_start);
1358                /* out: */
1359                if (jit->prg_buf) {
1360                        *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
1361                                (jit->prg - patch_1_clrj) >> 1;
1362                        *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
1363                                (jit->prg - patch_2_clij) >> 1;
1364                        *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
1365                                (jit->prg - patch_3_brc) >> 1;
1366                }
1367                break;
1368        }
1369        case BPF_JMP | BPF_EXIT: /* return b0 */
1370                last = (i == fp->len - 1) ? 1 : 0;
1371                if (last)
1372                        break;
1373                if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
1374                        /* brc 0xf, <exit> */
1375                        EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
1376                else
1377                        /* brcl 0xf, <exit> */
1378                        EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
1379                break;
1380        /*
1381         * Branch relative (number of skipped instructions) to offset on
1382         * condition.
1383         *
1384         * Condition code to mask mapping:
1385         *
1386         * CC | Description        | Mask
1387         * ------------------------------
1388         * 0  | Operands equal     |    8
1389         * 1  | First operand low  |    4
1390         * 2  | First operand high |    2
1391         * 3  | Unused             |    1
1392         *
1393         * For s390x relative branches: ip = ip + off_bytes
1394         * For BPF relative branches:   insn = insn + off_insns + 1
1395         *
1396         * For example for s390x with offset 0 we jump to the branch
1397         * instruction itself (loop) and for BPF with offset 0 we
1398         * branch to the instruction behind the branch.
1399         */
1400        case BPF_JMP | BPF_JA: /* if (true) */
1401                mask = 0xf000; /* j */
1402                goto branch_oc;
1403        case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
1404        case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */
1405                mask = 0x2000; /* jh */
1406                goto branch_ks;
1407        case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
1408        case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */
1409                mask = 0x4000; /* jl */
1410                goto branch_ks;
1411        case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
1412        case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */
1413                mask = 0xa000; /* jhe */
1414                goto branch_ks;
1415        case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
1416        case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */
1417                mask = 0xc000; /* jle */
1418                goto branch_ks;
1419        case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
1420        case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */
1421                mask = 0x2000; /* jh */
1422                goto branch_ku;
1423        case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
1424        case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */
1425                mask = 0x4000; /* jl */
1426                goto branch_ku;
1427        case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
1428        case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */
1429                mask = 0xa000; /* jhe */
1430                goto branch_ku;
1431        case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
1432        case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */
1433                mask = 0xc000; /* jle */
1434                goto branch_ku;
1435        case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
1436        case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */
1437                mask = 0x7000; /* jne */
1438                goto branch_ku;
1439        case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */
1440        case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */
1441                mask = 0x8000; /* je */
1442                goto branch_ku;
1443        case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */
1444        case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */
1445                mask = 0x7000; /* jnz */
1446                if (BPF_CLASS(insn->code) == BPF_JMP32) {
1447                        /* llilf %w1,imm (load zero extend imm) */
1448                        EMIT6_IMM(0xc00f0000, REG_W1, imm);
1449                        /* nr %w1,%dst */
1450                        EMIT2(0x1400, REG_W1, dst_reg);
1451                } else {
1452                        /* lgfi %w1,imm (load sign extend imm) */
1453                        EMIT6_IMM(0xc0010000, REG_W1, imm);
1454                        /* ngr %w1,%dst */
1455                        EMIT4(0xb9800000, REG_W1, dst_reg);
1456                }
1457                goto branch_oc;
1458
1459        case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
1460        case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */
1461                mask = 0x2000; /* jh */
1462                goto branch_xs;
1463        case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
1464        case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */
1465                mask = 0x4000; /* jl */
1466                goto branch_xs;
1467        case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
1468        case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */
1469                mask = 0xa000; /* jhe */
1470                goto branch_xs;
1471        case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
1472        case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */
1473                mask = 0xc000; /* jle */
1474                goto branch_xs;
1475        case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
1476        case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */
1477                mask = 0x2000; /* jh */
1478                goto branch_xu;
1479        case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
1480        case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */
1481                mask = 0x4000; /* jl */
1482                goto branch_xu;
1483        case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
1484        case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */
1485                mask = 0xa000; /* jhe */
1486                goto branch_xu;
1487        case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
1488        case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */
1489                mask = 0xc000; /* jle */
1490                goto branch_xu;
1491        case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
1492        case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */
1493                mask = 0x7000; /* jne */
1494                goto branch_xu;
1495        case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */
1496        case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */
1497                mask = 0x8000; /* je */
1498                goto branch_xu;
1499        case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */
1500        case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */
1501        {
1502                bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1503
1504                mask = 0x7000; /* jnz */
1505                /* nrk or ngrk %w1,%dst,%src */
1506                EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000),
1507                          REG_W1, dst_reg, src_reg);
1508                goto branch_oc;
1509branch_ks:
1510                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1511                /* cfi or cgfi %dst,imm */
1512                EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
1513                          dst_reg, imm);
1514                if (!is_first_pass(jit) &&
1515                    can_use_rel(jit, addrs[i + off + 1])) {
1516                        /* brc mask,off */
1517                        EMIT4_PCREL_RIC(0xa7040000,
1518                                        mask >> 12, addrs[i + off + 1]);
1519                } else {
1520                        /* brcl mask,off */
1521                        EMIT6_PCREL_RILC(0xc0040000,
1522                                         mask >> 12, addrs[i + off + 1]);
1523                }
1524                break;
1525branch_ku:
1526                /* lgfi %w1,imm (load sign extend imm) */
1527                src_reg = REG_1;
1528                EMIT6_IMM(0xc0010000, src_reg, imm);
1529                goto branch_xu;
1530branch_xs:
1531                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1532                if (!is_first_pass(jit) &&
1533                    can_use_rel(jit, addrs[i + off + 1])) {
1534                        /* crj or cgrj %dst,%src,mask,off */
1535                        EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
1536                                    dst_reg, src_reg, i, off, mask);
1537                } else {
1538                        /* cr or cgr %dst,%src */
1539                        if (is_jmp32)
1540                                EMIT2(0x1900, dst_reg, src_reg);
1541                        else
1542                                EMIT4(0xb9200000, dst_reg, src_reg);
1543                        /* brcl mask,off */
1544                        EMIT6_PCREL_RILC(0xc0040000,
1545                                         mask >> 12, addrs[i + off + 1]);
1546                }
1547                break;
1548branch_xu:
1549                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1550                if (!is_first_pass(jit) &&
1551                    can_use_rel(jit, addrs[i + off + 1])) {
1552                        /* clrj or clgrj %dst,%src,mask,off */
1553                        EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
1554                                    dst_reg, src_reg, i, off, mask);
1555                } else {
1556                        /* clr or clgr %dst,%src */
1557                        if (is_jmp32)
1558                                EMIT2(0x1500, dst_reg, src_reg);
1559                        else
1560                                EMIT4(0xb9210000, dst_reg, src_reg);
1561                        /* brcl mask,off */
1562                        EMIT6_PCREL_RILC(0xc0040000,
1563                                         mask >> 12, addrs[i + off + 1]);
1564                }
1565                break;
1566branch_oc:
1567                if (!is_first_pass(jit) &&
1568                    can_use_rel(jit, addrs[i + off + 1])) {
1569                        /* brc mask,off */
1570                        EMIT4_PCREL_RIC(0xa7040000,
1571                                        mask >> 12, addrs[i + off + 1]);
1572                } else {
1573                        /* brcl mask,off */
1574                        EMIT6_PCREL_RILC(0xc0040000,
1575                                         mask >> 12, addrs[i + off + 1]);
1576                }
1577                break;
1578        }
1579        default: /* too complex, give up */
1580                pr_err("Unknown opcode %02x\n", insn->code);
1581                return -1;
1582        }
1583
1584        if (probe_prg != -1) {
1585                /*
1586                 * Handlers of certain exceptions leave psw.addr pointing to
1587                 * the instruction directly after the failing one. Therefore,
1588                 * create two exception table entries and also add a nop in
1589                 * case two probing instructions come directly after each
1590                 * other.
1591                 */
1592                nop_prg = jit->prg;
1593                /* bcr 0,%0 */
1594                _EMIT2(0x0700);
1595                err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg);
1596                if (err < 0)
1597                        return err;
1598        }
1599
1600        return insn_count;
1601}
1602
1603/*
1604 * Return whether new i-th instruction address does not violate any invariant
1605 */
1606static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
1607{
1608        /* On the first pass anything goes */
1609        if (is_first_pass(jit))
1610                return true;
1611
1612        /* The codegen pass must not change anything */
1613        if (is_codegen_pass(jit))
1614                return jit->addrs[i] == jit->prg;
1615
1616        /* Passes in between must not increase code size */
1617        return jit->addrs[i] >= jit->prg;
1618}
1619
1620/*
1621 * Update the address of i-th instruction
1622 */
1623static int bpf_set_addr(struct bpf_jit *jit, int i)
1624{
1625        int delta;
1626
1627        if (is_codegen_pass(jit)) {
1628                delta = jit->prg - jit->addrs[i];
1629                if (delta < 0)
1630                        bpf_skip(jit, -delta);
1631        }
1632        if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
1633                return -1;
1634        jit->addrs[i] = jit->prg;
1635        return 0;
1636}
1637
1638/*
1639 * Compile eBPF program into s390x code
1640 */
1641static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
1642                        bool extra_pass, u32 stack_depth)
1643{
1644        int i, insn_count, lit32_size, lit64_size;
1645
1646        jit->lit32 = jit->lit32_start;
1647        jit->lit64 = jit->lit64_start;
1648        jit->prg = 0;
1649        jit->excnt = 0;
1650
1651        bpf_jit_prologue(jit, stack_depth);
1652        if (bpf_set_addr(jit, 0) < 0)
1653                return -1;
1654        for (i = 0; i < fp->len; i += insn_count) {
1655                insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
1656                if (insn_count < 0)
1657                        return -1;
1658                /* Next instruction address */
1659                if (bpf_set_addr(jit, i + insn_count) < 0)
1660                        return -1;
1661        }
1662        bpf_jit_epilogue(jit, stack_depth);
1663
1664        lit32_size = jit->lit32 - jit->lit32_start;
1665        lit64_size = jit->lit64 - jit->lit64_start;
1666        jit->lit32_start = jit->prg;
1667        if (lit32_size)
1668                jit->lit32_start = ALIGN(jit->lit32_start, 4);
1669        jit->lit64_start = jit->lit32_start + lit32_size;
1670        if (lit64_size)
1671                jit->lit64_start = ALIGN(jit->lit64_start, 8);
1672        jit->size = jit->lit64_start + lit64_size;
1673        jit->size_prg = jit->prg;
1674
1675        if (WARN_ON_ONCE(fp->aux->extable &&
1676                         jit->excnt != fp->aux->num_exentries))
1677                /* Verifier bug - too many entries. */
1678                return -1;
1679
1680        return 0;
1681}
1682
1683bool bpf_jit_needs_zext(void)
1684{
1685        return true;
1686}
1687
1688struct s390_jit_data {
1689        struct bpf_binary_header *header;
1690        struct bpf_jit ctx;
1691        int pass;
1692};
1693
1694static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
1695                                               struct bpf_prog *fp)
1696{
1697        struct bpf_binary_header *header;
1698        u32 extable_size;
1699        u32 code_size;
1700
1701        /* We need two entries per insn. */
1702        fp->aux->num_exentries *= 2;
1703
1704        code_size = roundup(jit->size,
1705                            __alignof__(struct exception_table_entry));
1706        extable_size = fp->aux->num_exentries *
1707                sizeof(struct exception_table_entry);
1708        header = bpf_jit_binary_alloc(code_size + extable_size, &jit->prg_buf,
1709                                      8, jit_fill_hole);
1710        if (!header)
1711                return NULL;
1712        fp->aux->extable = (struct exception_table_entry *)
1713                (jit->prg_buf + code_size);
1714        return header;
1715}
1716
1717/*
1718 * Compile eBPF program "fp"
1719 */
1720struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1721{
1722        u32 stack_depth = round_up(fp->aux->stack_depth, 8);
1723        struct bpf_prog *tmp, *orig_fp = fp;
1724        struct bpf_binary_header *header;
1725        struct s390_jit_data *jit_data;
1726        bool tmp_blinded = false;
1727        bool extra_pass = false;
1728        struct bpf_jit jit;
1729        int pass;
1730
1731        if (!fp->jit_requested)
1732                return orig_fp;
1733
1734        tmp = bpf_jit_blind_constants(fp);
1735        /*
1736         * If blinding was requested and we failed during blinding,
1737         * we must fall back to the interpreter.
1738         */
1739        if (IS_ERR(tmp))
1740                return orig_fp;
1741        if (tmp != fp) {
1742                tmp_blinded = true;
1743                fp = tmp;
1744        }
1745
1746        jit_data = fp->aux->jit_data;
1747        if (!jit_data) {
1748                jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1749                if (!jit_data) {
1750                        fp = orig_fp;
1751                        goto out;
1752                }
1753                fp->aux->jit_data = jit_data;
1754        }
1755        if (jit_data->ctx.addrs) {
1756                jit = jit_data->ctx;
1757                header = jit_data->header;
1758                extra_pass = true;
1759                pass = jit_data->pass + 1;
1760                goto skip_init_ctx;
1761        }
1762
1763        memset(&jit, 0, sizeof(jit));
1764        jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
1765        if (jit.addrs == NULL) {
1766                fp = orig_fp;
1767                goto out;
1768        }
1769        /*
1770         * Three initial passes:
1771         *   - 1/2: Determine clobbered registers
1772         *   - 3:   Calculate program size and addrs arrray
1773         */
1774        for (pass = 1; pass <= 3; pass++) {
1775                if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1776                        fp = orig_fp;
1777                        goto free_addrs;
1778                }
1779        }
1780        /*
1781         * Final pass: Allocate and generate program
1782         */
1783        header = bpf_jit_alloc(&jit, fp);
1784        if (!header) {
1785                fp = orig_fp;
1786                goto free_addrs;
1787        }
1788skip_init_ctx:
1789        if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
1790                bpf_jit_binary_free(header);
1791                fp = orig_fp;
1792                goto free_addrs;
1793        }
1794        if (bpf_jit_enable > 1) {
1795                bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
1796                print_fn_code(jit.prg_buf, jit.size_prg);
1797        }
1798        if (!fp->is_func || extra_pass) {
1799                bpf_jit_binary_lock_ro(header);
1800        } else {
1801                jit_data->header = header;
1802                jit_data->ctx = jit;
1803                jit_data->pass = pass;
1804        }
1805        fp->bpf_func = (void *) jit.prg_buf;
1806        fp->jited = 1;
1807        fp->jited_len = jit.size;
1808
1809        if (!fp->is_func || extra_pass) {
1810                bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
1811free_addrs:
1812                kvfree(jit.addrs);
1813                kfree(jit_data);
1814                fp->aux->jit_data = NULL;
1815        }
1816out:
1817        if (tmp_blinded)
1818                bpf_jit_prog_release_other(fp, fp == orig_fp ?
1819                                           tmp : orig_fp);
1820        return fp;
1821}
1822