linux/arch/powerpc/net/bpf_jit_comp.c
<<
>>
Prefs
   1/* bpf_jit_comp.c: BPF JIT compiler
   2 *
   3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
   4 *
   5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
   6 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; version 2
  11 * of the License.
  12 */
  13#include <linux/moduleloader.h>
  14#include <asm/cacheflush.h>
  15#include <linux/netdevice.h>
  16#include <linux/filter.h>
  17#include <linux/if_vlan.h>
  18
  19#include "bpf_jit.h"
  20
  21int bpf_jit_enable __read_mostly;
  22
  23static inline void bpf_flush_icache(void *start, void *end)
  24{
  25        smp_wmb();
  26        flush_icache_range((unsigned long)start, (unsigned long)end);
  27}
  28
  29static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
  30                                   struct codegen_context *ctx)
  31{
  32        int i;
  33        const struct sock_filter *filter = fp->insns;
  34
  35        if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
  36                /* Make stackframe */
  37                if (ctx->seen & SEEN_DATAREF) {
  38                        /* If we call any helpers (for loads), save LR */
  39                        EMIT(PPC_INST_MFLR | __PPC_RT(R0));
  40                        PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
  41
  42                        /* Back up non-volatile regs. */
  43                        PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
  44                        PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
  45                }
  46                if (ctx->seen & SEEN_MEM) {
  47                        /*
  48                         * Conditionally save regs r15-r31 as some will be used
  49                         * for M[] data.
  50                         */
  51                        for (i = r_M; i < (r_M+16); i++) {
  52                                if (ctx->seen & (1 << (i-r_M)))
  53                                        PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
  54                        }
  55                }
  56                PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
  57        }
  58
  59        if (ctx->seen & SEEN_DATAREF) {
  60                /*
  61                 * If this filter needs to access skb data,
  62                 * prepare r_D and r_HL:
  63                 *  r_HL = skb->len - skb->data_len
  64                 *  r_D  = skb->data
  65                 */
  66                PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
  67                                                         data_len));
  68                PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
  69                PPC_SUB(r_HL, r_HL, r_scratch1);
  70                PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
  71        }
  72
  73        if (ctx->seen & SEEN_XREG) {
  74                /*
  75                 * TODO: Could also detect whether first instr. sets X and
  76                 * avoid this (as below, with A).
  77                 */
  78                PPC_LI(r_X, 0);
  79        }
  80
  81        switch (filter[0].code) {
  82        case BPF_RET | BPF_K:
  83        case BPF_LD | BPF_W | BPF_LEN:
  84        case BPF_LD | BPF_W | BPF_ABS:
  85        case BPF_LD | BPF_H | BPF_ABS:
  86        case BPF_LD | BPF_B | BPF_ABS:
  87                /* first instruction sets A register (or is RET 'constant') */
  88                break;
  89        default:
  90                /* make sure we dont leak kernel information to user */
  91                PPC_LI(r_A, 0);
  92        }
  93}
  94
  95static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
  96{
  97        int i;
  98
  99        if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
 100                PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
 101                if (ctx->seen & SEEN_DATAREF) {
 102                        PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
 103                        PPC_MTLR(0);
 104                        PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
 105                        PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
 106                }
 107                if (ctx->seen & SEEN_MEM) {
 108                        /* Restore any saved non-vol registers */
 109                        for (i = r_M; i < (r_M+16); i++) {
 110                                if (ctx->seen & (1 << (i-r_M)))
 111                                        PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
 112                        }
 113                }
 114        }
 115        /* The RETs have left a return value in R3. */
 116
 117        PPC_BLR();
 118}
 119
 120#define CHOOSE_LOAD_FUNC(K, func) \
 121        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 122
 123/* Assemble the body code between the prologue & epilogue. */
 124static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 125                              struct codegen_context *ctx,
 126                              unsigned int *addrs)
 127{
 128        const struct sock_filter *filter = fp->insns;
 129        int flen = fp->len;
 130        u8 *func;
 131        unsigned int true_cond;
 132        int i;
 133
 134        /* Start of epilogue code */
 135        unsigned int exit_addr = addrs[flen];
 136
 137        for (i = 0; i < flen; i++) {
 138                unsigned int K = filter[i].k;
 139                u16 code = bpf_anc_helper(&filter[i]);
 140
 141                /*
 142                 * addrs[] maps a BPF bytecode address into a real offset from
 143                 * the start of the body code.
 144                 */
 145                addrs[i] = ctx->idx * 4;
 146
 147                switch (code) {
 148                        /*** ALU ops ***/
 149                case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
 150                        ctx->seen |= SEEN_XREG;
 151                        PPC_ADD(r_A, r_A, r_X);
 152                        break;
 153                case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
 154                        if (!K)
 155                                break;
 156                        PPC_ADDI(r_A, r_A, IMM_L(K));
 157                        if (K >= 32768)
 158                                PPC_ADDIS(r_A, r_A, IMM_HA(K));
 159                        break;
 160                case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
 161                        ctx->seen |= SEEN_XREG;
 162                        PPC_SUB(r_A, r_A, r_X);
 163                        break;
 164                case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
 165                        if (!K)
 166                                break;
 167                        PPC_ADDI(r_A, r_A, IMM_L(-K));
 168                        if (K >= 32768)
 169                                PPC_ADDIS(r_A, r_A, IMM_HA(-K));
 170                        break;
 171                case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
 172                        ctx->seen |= SEEN_XREG;
 173                        PPC_MUL(r_A, r_A, r_X);
 174                        break;
 175                case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
 176                        if (K < 32768)
 177                                PPC_MULI(r_A, r_A, K);
 178                        else {
 179                                PPC_LI32(r_scratch1, K);
 180                                PPC_MUL(r_A, r_A, r_scratch1);
 181                        }
 182                        break;
 183                case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
 184                case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
 185                        ctx->seen |= SEEN_XREG;
 186                        PPC_CMPWI(r_X, 0);
 187                        if (ctx->pc_ret0 != -1) {
 188                                PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
 189                        } else {
 190                                PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
 191                                PPC_LI(r_ret, 0);
 192                                PPC_JMP(exit_addr);
 193                        }
 194                        if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
 195                                PPC_DIVWU(r_scratch1, r_A, r_X);
 196                                PPC_MUL(r_scratch1, r_X, r_scratch1);
 197                                PPC_SUB(r_A, r_A, r_scratch1);
 198                        } else {
 199                                PPC_DIVWU(r_A, r_A, r_X);
 200                        }
 201                        break;
 202                case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
 203                        PPC_LI32(r_scratch2, K);
 204                        PPC_DIVWU(r_scratch1, r_A, r_scratch2);
 205                        PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
 206                        PPC_SUB(r_A, r_A, r_scratch1);
 207                        break;
 208                case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
 209                        if (K == 1)
 210                                break;
 211                        PPC_LI32(r_scratch1, K);
 212                        PPC_DIVWU(r_A, r_A, r_scratch1);
 213                        break;
 214                case BPF_ALU | BPF_AND | BPF_X:
 215                        ctx->seen |= SEEN_XREG;
 216                        PPC_AND(r_A, r_A, r_X);
 217                        break;
 218                case BPF_ALU | BPF_AND | BPF_K:
 219                        if (!IMM_H(K))
 220                                PPC_ANDI(r_A, r_A, K);
 221                        else {
 222                                PPC_LI32(r_scratch1, K);
 223                                PPC_AND(r_A, r_A, r_scratch1);
 224                        }
 225                        break;
 226                case BPF_ALU | BPF_OR | BPF_X:
 227                        ctx->seen |= SEEN_XREG;
 228                        PPC_OR(r_A, r_A, r_X);
 229                        break;
 230                case BPF_ALU | BPF_OR | BPF_K:
 231                        if (IMM_L(K))
 232                                PPC_ORI(r_A, r_A, IMM_L(K));
 233                        if (K >= 65536)
 234                                PPC_ORIS(r_A, r_A, IMM_H(K));
 235                        break;
 236                case BPF_ANC | SKF_AD_ALU_XOR_X:
 237                case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
 238                        ctx->seen |= SEEN_XREG;
 239                        PPC_XOR(r_A, r_A, r_X);
 240                        break;
 241                case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
 242                        if (IMM_L(K))
 243                                PPC_XORI(r_A, r_A, IMM_L(K));
 244                        if (K >= 65536)
 245                                PPC_XORIS(r_A, r_A, IMM_H(K));
 246                        break;
 247                case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
 248                        ctx->seen |= SEEN_XREG;
 249                        PPC_SLW(r_A, r_A, r_X);
 250                        break;
 251                case BPF_ALU | BPF_LSH | BPF_K:
 252                        if (K == 0)
 253                                break;
 254                        else
 255                                PPC_SLWI(r_A, r_A, K);
 256                        break;
 257                case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
 258                        ctx->seen |= SEEN_XREG;
 259                        PPC_SRW(r_A, r_A, r_X);
 260                        break;
 261                case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
 262                        if (K == 0)
 263                                break;
 264                        else
 265                                PPC_SRWI(r_A, r_A, K);
 266                        break;
 267                case BPF_ALU | BPF_NEG:
 268                        PPC_NEG(r_A, r_A);
 269                        break;
 270                case BPF_RET | BPF_K:
 271                        PPC_LI32(r_ret, K);
 272                        if (!K) {
 273                                if (ctx->pc_ret0 == -1)
 274                                        ctx->pc_ret0 = i;
 275                        }
 276                        /*
 277                         * If this isn't the very last instruction, branch to
 278                         * the epilogue if we've stuff to clean up.  Otherwise,
 279                         * if there's nothing to tidy, just return.  If we /are/
 280                         * the last instruction, we're about to fall through to
 281                         * the epilogue to return.
 282                         */
 283                        if (i != flen - 1) {
 284                                /*
 285                                 * Note: 'seen' is properly valid only on pass
 286                                 * #2.  Both parts of this conditional are the
 287                                 * same instruction size though, meaning the
 288                                 * first pass will still correctly determine the
 289                                 * code size/addresses.
 290                                 */
 291                                if (ctx->seen)
 292                                        PPC_JMP(exit_addr);
 293                                else
 294                                        PPC_BLR();
 295                        }
 296                        break;
 297                case BPF_RET | BPF_A:
 298                        PPC_MR(r_ret, r_A);
 299                        if (i != flen - 1) {
 300                                if (ctx->seen)
 301                                        PPC_JMP(exit_addr);
 302                                else
 303                                        PPC_BLR();
 304                        }
 305                        break;
 306                case BPF_MISC | BPF_TAX: /* X = A */
 307                        PPC_MR(r_X, r_A);
 308                        break;
 309                case BPF_MISC | BPF_TXA: /* A = X */
 310                        ctx->seen |= SEEN_XREG;
 311                        PPC_MR(r_A, r_X);
 312                        break;
 313
 314                        /*** Constant loads/M[] access ***/
 315                case BPF_LD | BPF_IMM: /* A = K */
 316                        PPC_LI32(r_A, K);
 317                        break;
 318                case BPF_LDX | BPF_IMM: /* X = K */
 319                        PPC_LI32(r_X, K);
 320                        break;
 321                case BPF_LD | BPF_MEM: /* A = mem[K] */
 322                        PPC_MR(r_A, r_M + (K & 0xf));
 323                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 324                        break;
 325                case BPF_LDX | BPF_MEM: /* X = mem[K] */
 326                        PPC_MR(r_X, r_M + (K & 0xf));
 327                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 328                        break;
 329                case BPF_ST: /* mem[K] = A */
 330                        PPC_MR(r_M + (K & 0xf), r_A);
 331                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 332                        break;
 333                case BPF_STX: /* mem[K] = X */
 334                        PPC_MR(r_M + (K & 0xf), r_X);
 335                        ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
 336                        break;
 337                case BPF_LD | BPF_W | BPF_LEN: /*       A = skb->len; */
 338                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 339                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
 340                        break;
 341                case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
 342                        PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
 343                        break;
 344
 345                        /*** Ancillary info loads ***/
 346                case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
 347                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 348                                                  protocol) != 2);
 349                        PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 350                                                            protocol));
 351                        break;
 352                case BPF_ANC | SKF_AD_IFINDEX:
 353                case BPF_ANC | SKF_AD_HATYPE:
 354                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 355                                                ifindex) != 4);
 356                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 357                                                type) != 2);
 358                        PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
 359                                                                dev));
 360                        PPC_CMPDI(r_scratch1, 0);
 361                        if (ctx->pc_ret0 != -1) {
 362                                PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
 363                        } else {
 364                                /* Exit, returning 0; first pass hits here. */
 365                                PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
 366                                PPC_LI(r_ret, 0);
 367                                PPC_JMP(exit_addr);
 368                        }
 369                        if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
 370                                PPC_LWZ_OFFS(r_A, r_scratch1,
 371                                     offsetof(struct net_device, ifindex));
 372                        } else {
 373                                PPC_LHZ_OFFS(r_A, r_scratch1,
 374                                     offsetof(struct net_device, type));
 375                        }
 376
 377                        break;
 378                case BPF_ANC | SKF_AD_MARK:
 379                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 380                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 381                                                          mark));
 382                        break;
 383                case BPF_ANC | SKF_AD_RXHASH:
 384                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 385                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 386                                                          hash));
 387                        break;
 388                case BPF_ANC | SKF_AD_VLAN_TAG:
 389                case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 390                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 391                        BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 392
 393                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 394                                                          vlan_tci));
 395                        if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
 396                                PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
 397                        } else {
 398                                PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
 399                                PPC_SRWI(r_A, r_A, 12);
 400                        }
 401                        break;
 402                case BPF_ANC | SKF_AD_QUEUE:
 403                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 404                                                  queue_mapping) != 2);
 405                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 406                                                          queue_mapping));
 407                        break;
 408                case BPF_ANC | SKF_AD_PKTTYPE:
 409                        PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
 410                        PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
 411                        PPC_SRWI(r_A, r_A, 5);
 412                        break;
 413                case BPF_ANC | SKF_AD_CPU:
 414                        PPC_BPF_LOAD_CPU(r_A);
 415                        break;
 416                        /*** Absolute loads from packet header/data ***/
 417                case BPF_LD | BPF_W | BPF_ABS:
 418                        func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 419                        goto common_load;
 420                case BPF_LD | BPF_H | BPF_ABS:
 421                        func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 422                        goto common_load;
 423                case BPF_LD | BPF_B | BPF_ABS:
 424                        func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 425                common_load:
 426                        /* Load from [K]. */
 427                        ctx->seen |= SEEN_DATAREF;
 428                        PPC_FUNC_ADDR(r_scratch1, func);
 429                        PPC_MTLR(r_scratch1);
 430                        PPC_LI32(r_addr, K);
 431                        PPC_BLRL();
 432                        /*
 433                         * Helper returns 'lt' condition on error, and an
 434                         * appropriate return value in r3
 435                         */
 436                        PPC_BCC(COND_LT, exit_addr);
 437                        break;
 438
 439                        /*** Indirect loads from packet header/data ***/
 440                case BPF_LD | BPF_W | BPF_IND:
 441                        func = sk_load_word;
 442                        goto common_load_ind;
 443                case BPF_LD | BPF_H | BPF_IND:
 444                        func = sk_load_half;
 445                        goto common_load_ind;
 446                case BPF_LD | BPF_B | BPF_IND:
 447                        func = sk_load_byte;
 448                common_load_ind:
 449                        /*
 450                         * Load from [X + K].  Negative offsets are tested for
 451                         * in the helper functions.
 452                         */
 453                        ctx->seen |= SEEN_DATAREF | SEEN_XREG;
 454                        PPC_FUNC_ADDR(r_scratch1, func);
 455                        PPC_MTLR(r_scratch1);
 456                        PPC_ADDI(r_addr, r_X, IMM_L(K));
 457                        if (K >= 32768)
 458                                PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
 459                        PPC_BLRL();
 460                        /* If error, cr0.LT set */
 461                        PPC_BCC(COND_LT, exit_addr);
 462                        break;
 463
 464                case BPF_LDX | BPF_B | BPF_MSH:
 465                        func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 466                        goto common_load;
 467                        break;
 468
 469                        /*** Jump and branches ***/
 470                case BPF_JMP | BPF_JA:
 471                        if (K != 0)
 472                                PPC_JMP(addrs[i + 1 + K]);
 473                        break;
 474
 475                case BPF_JMP | BPF_JGT | BPF_K:
 476                case BPF_JMP | BPF_JGT | BPF_X:
 477                        true_cond = COND_GT;
 478                        goto cond_branch;
 479                case BPF_JMP | BPF_JGE | BPF_K:
 480                case BPF_JMP | BPF_JGE | BPF_X:
 481                        true_cond = COND_GE;
 482                        goto cond_branch;
 483                case BPF_JMP | BPF_JEQ | BPF_K:
 484                case BPF_JMP | BPF_JEQ | BPF_X:
 485                        true_cond = COND_EQ;
 486                        goto cond_branch;
 487                case BPF_JMP | BPF_JSET | BPF_K:
 488                case BPF_JMP | BPF_JSET | BPF_X:
 489                        true_cond = COND_NE;
 490                        /* Fall through */
 491                cond_branch:
 492                        /* same targets, can avoid doing the test :) */
 493                        if (filter[i].jt == filter[i].jf) {
 494                                if (filter[i].jt > 0)
 495                                        PPC_JMP(addrs[i + 1 + filter[i].jt]);
 496                                break;
 497                        }
 498
 499                        switch (code) {
 500                        case BPF_JMP | BPF_JGT | BPF_X:
 501                        case BPF_JMP | BPF_JGE | BPF_X:
 502                        case BPF_JMP | BPF_JEQ | BPF_X:
 503                                ctx->seen |= SEEN_XREG;
 504                                PPC_CMPLW(r_A, r_X);
 505                                break;
 506                        case BPF_JMP | BPF_JSET | BPF_X:
 507                                ctx->seen |= SEEN_XREG;
 508                                PPC_AND_DOT(r_scratch1, r_A, r_X);
 509                                break;
 510                        case BPF_JMP | BPF_JEQ | BPF_K:
 511                        case BPF_JMP | BPF_JGT | BPF_K:
 512                        case BPF_JMP | BPF_JGE | BPF_K:
 513                                if (K < 32768)
 514                                        PPC_CMPLWI(r_A, K);
 515                                else {
 516                                        PPC_LI32(r_scratch1, K);
 517                                        PPC_CMPLW(r_A, r_scratch1);
 518                                }
 519                                break;
 520                        case BPF_JMP | BPF_JSET | BPF_K:
 521                                if (K < 32768)
 522                                        /* PPC_ANDI is /only/ dot-form */
 523                                        PPC_ANDI(r_scratch1, r_A, K);
 524                                else {
 525                                        PPC_LI32(r_scratch1, K);
 526                                        PPC_AND_DOT(r_scratch1, r_A,
 527                                                    r_scratch1);
 528                                }
 529                                break;
 530                        }
 531                        /* Sometimes branches are constructed "backward", with
 532                         * the false path being the branch and true path being
 533                         * a fallthrough to the next instruction.
 534                         */
 535                        if (filter[i].jt == 0)
 536                                /* Swap the sense of the branch */
 537                                PPC_BCC(true_cond ^ COND_CMP_TRUE,
 538                                        addrs[i + 1 + filter[i].jf]);
 539                        else {
 540                                PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
 541                                if (filter[i].jf != 0)
 542                                        PPC_JMP(addrs[i + 1 + filter[i].jf]);
 543                        }
 544                        break;
 545                default:
 546                        /* The filter contains something cruel & unusual.
 547                         * We don't handle it, but also there shouldn't be
 548                         * anything missing from our list.
 549                         */
 550                        if (printk_ratelimit())
 551                                pr_err("BPF filter opcode %04x (@%d) unsupported\n",
 552                                       filter[i].code, i);
 553                        return -ENOTSUPP;
 554                }
 555
 556        }
 557        /* Set end-of-body-code address for exit. */
 558        addrs[i] = ctx->idx * 4;
 559
 560        return 0;
 561}
 562
 563void bpf_jit_compile(struct bpf_prog *fp)
 564{
 565        unsigned int proglen;
 566        unsigned int alloclen;
 567        u32 *image = NULL;
 568        u32 *code_base;
 569        unsigned int *addrs;
 570        struct codegen_context cgctx;
 571        int pass;
 572        int flen = fp->len;
 573
 574        if (!bpf_jit_enable)
 575                return;
 576
 577        addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
 578        if (addrs == NULL)
 579                return;
 580
 581        /*
 582         * There are multiple assembly passes as the generated code will change
 583         * size as it settles down, figuring out the max branch offsets/exit
 584         * paths required.
 585         *
 586         * The range of standard conditional branches is +/- 32Kbytes.  Since
 587         * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
 588         * finish with 8 bytes/instruction.  Not feasible, so long jumps are
 589         * used, distinct from short branches.
 590         *
 591         * Current:
 592         *
 593         * For now, both branch types assemble to 2 words (short branches padded
 594         * with a NOP); this is less efficient, but assembly will always complete
 595         * after exactly 3 passes:
 596         *
 597         * First pass: No code buffer; Program is "faux-generated" -- no code
 598         * emitted but maximum size of output determined (and addrs[] filled
 599         * in).  Also, we note whether we use M[], whether we use skb data, etc.
 600         * All generation choices assumed to be 'worst-case', e.g. branches all
 601         * far (2 instructions), return path code reduction not available, etc.
 602         *
 603         * Second pass: Code buffer allocated with size determined previously.
 604         * Prologue generated to support features we have seen used.  Exit paths
 605         * determined and addrs[] is filled in again, as code may be slightly
 606         * smaller as a result.
 607         *
 608         * Third pass: Code generated 'for real', and branch destinations
 609         * determined from now-accurate addrs[] map.
 610         *
 611         * Ideal:
 612         *
 613         * If we optimise this, near branches will be shorter.  On the
 614         * first assembly pass, we should err on the side of caution and
 615         * generate the biggest code.  On subsequent passes, branches will be
 616         * generated short or long and code size will reduce.  With smaller
 617         * code, more branches may fall into the short category, and code will
 618         * reduce more.
 619         *
 620         * Finally, if we see one pass generate code the same size as the
 621         * previous pass we have converged and should now generate code for
 622         * real.  Allocating at the end will also save the memory that would
 623         * otherwise be wasted by the (small) current code shrinkage.
 624         * Preferably, we should do a small number of passes (e.g. 5) and if we
 625         * haven't converged by then, get impatient and force code to generate
 626         * as-is, even if the odd branch would be left long.  The chances of a
 627         * long jump are tiny with all but the most enormous of BPF filter
 628         * inputs, so we should usually converge on the third pass.
 629         */
 630
 631        cgctx.idx = 0;
 632        cgctx.seen = 0;
 633        cgctx.pc_ret0 = -1;
 634        /* Scouting faux-generate pass 0 */
 635        if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
 636                /* We hit something illegal or unsupported. */
 637                goto out;
 638
 639        /*
 640         * Pretend to build prologue, given the features we've seen.  This will
 641         * update ctgtx.idx as it pretends to output instructions, then we can
 642         * calculate total size from idx.
 643         */
 644        bpf_jit_build_prologue(fp, 0, &cgctx);
 645        bpf_jit_build_epilogue(0, &cgctx);
 646
 647        proglen = cgctx.idx * 4;
 648        alloclen = proglen + FUNCTION_DESCR_SIZE;
 649        image = module_alloc(alloclen);
 650        if (!image)
 651                goto out;
 652
 653        code_base = image + (FUNCTION_DESCR_SIZE/4);
 654
 655        /* Code generation passes 1-2 */
 656        for (pass = 1; pass < 3; pass++) {
 657                /* Now build the prologue, body code & epilogue for real. */
 658                cgctx.idx = 0;
 659                bpf_jit_build_prologue(fp, code_base, &cgctx);
 660                bpf_jit_build_body(fp, code_base, &cgctx, addrs);
 661                bpf_jit_build_epilogue(code_base, &cgctx);
 662
 663                if (bpf_jit_enable > 1)
 664                        pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
 665                                proglen - (cgctx.idx * 4), cgctx.seen);
 666        }
 667
 668        if (bpf_jit_enable > 1)
 669                /* Note that we output the base address of the code_base
 670                 * rather than image, since opcodes are in code_base.
 671                 */
 672                bpf_jit_dump(flen, proglen, pass, code_base);
 673
 674        if (image) {
 675                bpf_flush_icache(code_base, code_base + (proglen/4));
 676#ifdef CONFIG_PPC64
 677                /* Function descriptor nastiness: Address + TOC */
 678                ((u64 *)image)[0] = (u64)code_base;
 679                ((u64 *)image)[1] = local_paca->kernel_toc;
 680#endif
 681                fp->bpf_func = (void *)image;
 682                fp->jited = true;
 683        }
 684out:
 685        kfree(addrs);
 686        return;
 687}
 688
 689void bpf_jit_free(struct bpf_prog *fp)
 690{
 691        if (fp->jited)
 692                module_memfree(fp->bpf_func);
 693
 694        bpf_prog_unlock_free(fp);
 695}
 696