linux/arch/powerpc/net/bpf_jit_comp.c
<<
>>
Prefs
   1/* bpf_jit_comp.c: BPF JIT compiler for PPC64
   2 *
   3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
   4 *
   5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 */
  12#include <linux/moduleloader.h>
  13#include <asm/cacheflush.h>
  14#include <linux/netdevice.h>
  15#include <linux/filter.h>
  16#include <linux/if_vlan.h>
  17
  18#include "bpf_jit.h"
  19
  20int bpf_jit_enable __read_mostly;
  21
  22static inline void bpf_flush_icache(void *start, void *end)
  23{
  24        smp_wmb();
  25        flush_icache_range((unsigned long)start, (unsigned long)end);
  26}
  27
  28static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
  29                                   struct codegen_context *ctx)
  30{
  31        int i;
  32        const struct sock_filter *filter = fp->insns;
  33
  34        if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
  35                /* Make stackframe */
  36                if (ctx->seen & SEEN_DATAREF) {
  37                        /* If we call any helpers (for loads), save LR */
  38                        EMIT(PPC_INST_MFLR | __PPC_RT(R0));
  39                        PPC_STD(0, 1, 16);
  40
  41                        /* Back up non-volatile regs. */
  42                        PPC_STD(r_D, 1, -(8*(32-r_D)));
  43                        PPC_STD(r_HL, 1, -(8*(32-r_HL)));
  44                }
  45                if (ctx->seen & SEEN_MEM) {
  46                        /*
  47                         * Conditionally save regs r15-r31 as some will be used
  48                         * for M[] data.
  49                         */
  50                        for (i = r_M; i < (r_M+16); i++) {
  51                                if (ctx->seen & (1 << (i-r_M)))
  52                                        PPC_STD(i, 1, -(8*(32-i)));
  53                        }
  54                }
  55                EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
  56                     (-BPF_PPC_STACKFRAME & 0xfffc));
  57        }
  58
  59        if (ctx->seen & SEEN_DATAREF) {
  60                /*
  61                 * If this filter needs to access skb data,
  62                 * prepare r_D and r_HL:
  63                 *  r_HL = skb->len - skb->data_len
  64                 *  r_D  = skb->data
  65                 */
  66                PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
  67                                                         data_len));
  68                PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
  69                PPC_SUB(r_HL, r_HL, r_scratch1);
  70                PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
  71        }
  72
  73        if (ctx->seen & SEEN_XREG) {
  74                /*
  75                 * TODO: Could also detect whether first instr. sets X and
  76                 * avoid this (as below, with A).
  77                 */
  78                PPC_LI(r_X, 0);
  79        }
  80
  81        switch (filter[0].code) {
  82        case BPF_S_RET_K:
  83        case BPF_S_LD_W_LEN:
  84        case BPF_S_ANC_PROTOCOL:
  85        case BPF_S_ANC_IFINDEX:
  86        case BPF_S_ANC_MARK:
  87        case BPF_S_ANC_RXHASH:
  88        case BPF_S_ANC_VLAN_TAG:
  89        case BPF_S_ANC_VLAN_TAG_PRESENT:
  90        case BPF_S_ANC_CPU:
  91        case BPF_S_ANC_QUEUE:
  92        case BPF_S_LD_W_ABS:
  93        case BPF_S_LD_H_ABS:
  94        case BPF_S_LD_B_ABS:
  95                /* first instruction sets A register (or is RET 'constant') */
  96                break;
  97        default:
  98                /* make sure we dont leak kernel information to user */
  99                PPC_LI(r_A, 0);
 100        }
 101}
 102
 103static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
 104{
 105        int i;
 106
 107        if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
 108                PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
 109                if (ctx->seen & SEEN_DATAREF) {
 110                        PPC_LD(0, 1, 16);
 111                        PPC_MTLR(0);
 112                        PPC_LD(r_D, 1, -(8*(32-r_D)));
 113                        PPC_LD(r_HL, 1, -(8*(32-r_HL)));
 114                }
 115                if (ctx->seen & SEEN_MEM) {
 116                        /* Restore any saved non-vol registers */
 117                        for (i = r_M; i < (r_M+16); i++) {
 118                                if (ctx->seen & (1 << (i-r_M)))
 119                                        PPC_LD(i, 1, -(8*(32-i)));
 120                        }
 121                }
 122        }
 123        /* The RETs have left a return value in R3. */
 124
 125        PPC_BLR();
 126}
 127
 128#define CHOOSE_LOAD_FUNC(K, func) \
 129        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 130
 131/* Assemble the body code between the prologue & epilogue. */
 132static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
 133                              struct codegen_context *ctx,
 134                              unsigned int *addrs)
 135{
 136        const struct sock_filter *filter = fp->insns;
 137        int flen = fp->len;
 138        u8 *func;
 139        unsigned int true_cond;
 140        int i;
 141
 142        /* Start of epilogue code */
 143        unsigned int exit_addr = addrs[flen];
 144
 145        for (i = 0; i < flen; i++) {
 146                unsigned int K = filter[i].k;
 147
 148                /*
 149                 * addrs[] maps a BPF bytecode address into a real offset from
 150                 * the start of the body code.
 151                 */
 152                addrs[i] = ctx->idx * 4;
 153
 154                switch (filter[i].code) {
 155                        /*** ALU ops ***/
 156                case BPF_S_ALU_ADD_X: /* A += X; */
 157                        ctx->seen |= SEEN_XREG;
 158                        PPC_ADD(r_A, r_A, r_X);
 159                        break;
 160                case BPF_S_ALU_ADD_K: /* A += K; */
 161                        if (!K)
 162                                break;
 163                        PPC_ADDI(r_A, r_A, IMM_L(K));
 164                        if (K >= 32768)
 165                                PPC_ADDIS(r_A, r_A, IMM_HA(K));
 166                        break;
 167                case BPF_S_ALU_SUB_X: /* A -= X; */
 168                        ctx->seen |= SEEN_XREG;
 169                        PPC_SUB(r_A, r_A, r_X);
 170                        break;
 171                case BPF_S_ALU_SUB_K: /* A -= K */
 172                        if (!K)
 173                                break;
 174                        PPC_ADDI(r_A, r_A, IMM_L(-K));
 175                        if (K >= 32768)
 176                                PPC_ADDIS(r_A, r_A, IMM_HA(-K));
 177                        break;
 178                case BPF_S_ALU_MUL_X: /* A *= X; */
 179                        ctx->seen |= SEEN_XREG;
 180                        PPC_MUL(r_A, r_A, r_X);
 181                        break;
 182                case BPF_S_ALU_MUL_K: /* A *= K */
 183                        if (K < 32768)
 184                                PPC_MULI(r_A, r_A, K);
 185                        else {
 186                                PPC_LI32(r_scratch1, K);
 187                                PPC_MUL(r_A, r_A, r_scratch1);
 188                        }
 189                        break;
 190                case BPF_S_ALU_MOD_X: /* A %= X; */
 191                        ctx->seen |= SEEN_XREG;
 192                        PPC_CMPWI(r_X, 0);
 193                        if (ctx->pc_ret0 != -1) {
 194                                PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
 195                        } else {
 196                                PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
 197                                PPC_LI(r_ret, 0);
 198                                PPC_JMP(exit_addr);
 199                        }
 200                        PPC_DIVWU(r_scratch1, r_A, r_X);
 201                        PPC_MUL(r_scratch1, r_X, r_scratch1);
 202                        PPC_SUB(r_A, r_A, r_scratch1);
 203                        break;
 204                case BPF_S_ALU_MOD_K: /* A %= K; */
 205                        PPC_LI32(r_scratch2, K);
 206                        PPC_DIVWU(r_scratch1, r_A, r_scratch2);
 207                        PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
 208                        PPC_SUB(r_A, r_A, r_scratch1);
 209                        break;
 210                case BPF_S_ALU_DIV_X: /* A /= X; */
 211                        ctx->seen |= SEEN_XREG;
 212                        PPC_CMPWI(r_X, 0);
 213                        if (ctx->pc_ret0 != -1) {
 214                                PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
 215                        } else {
 216                                /*
 217                                 * Exit, returning 0; first pass hits here
 218                                 * (longer worst-case code size).
 219                                 */
 220                                PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
 221                                PPC_LI(r_ret, 0);
 222                                PPC_JMP(exit_addr);
 223                        }
 224                        PPC_DIVWU(r_A, r_A, r_X);
 225                        break;
 226                case BPF_S_ALU_DIV_K: /* A /= K */
 227                        if (K == 1)
 228                                break;
 229                        PPC_LI32(r_scratch1, K);
 230                        PPC_DIVWU(r_A, r_A, r_scratch1);
 231                        break;
 232                case BPF_S_ALU_AND_X:
 233                        ctx->seen |= SEEN_XREG;
 234                        PPC_AND(r_A, r_A, r_X);
 235                        break;
 236                case BPF_S_ALU_AND_K:
 237                        if (!IMM_H(K))
 238                                PPC_ANDI(r_A, r_A, K);
 239                        else {
 240                                PPC_LI32(r_scratch1, K);
 241                                PPC_AND(r_A, r_A, r_scratch1);
 242                        }
 243                        break;
 244                case BPF_S_ALU_OR_X:
 245                        ctx->seen |= SEEN_XREG;
 246                        PPC_OR(r_A, r_A, r_X);
 247                        break;
 248                case BPF_S_ALU_OR_K:
 249                        if (IMM_L(K))
 250                                PPC_ORI(r_A, r_A, IMM_L(K));
 251                        if (K >= 65536)
 252                                PPC_ORIS(r_A, r_A, IMM_H(K));
 253                        break;
 254                case BPF_S_ANC_ALU_XOR_X:
 255                case BPF_S_ALU_XOR_X: /* A ^= X */
 256                        ctx->seen |= SEEN_XREG;
 257                        PPC_XOR(r_A, r_A, r_X);
 258                        break;
 259                case BPF_S_ALU_XOR_K: /* A ^= K */
 260                        if (IMM_L(K))
 261                                PPC_XORI(r_A, r_A, IMM_L(K));
 262                        if (K >= 65536)
 263                                PPC_XORIS(r_A, r_A, IMM_H(K));
 264                        break;
 265                case BPF_S_ALU_LSH_X: /* A <<= X; */
 266                        ctx->seen |= SEEN_XREG;
 267                        PPC_SLW(r_A, r_A, r_X);
 268                        break;
 269                case BPF_S_ALU_LSH_K:
 270                        if (K == 0)
 271                                break;
 272                        else
 273                                PPC_SLWI(r_A, r_A, K);
 274                        break;
 275                case BPF_S_ALU_RSH_X: /* A >>= X; */
 276                        ctx->seen |= SEEN_XREG;
 277                        PPC_SRW(r_A, r_A, r_X);
 278                        break;
 279                case BPF_S_ALU_RSH_K: /* A >>= K; */
 280                        if (K == 0)
 281                                break;
 282                        else
 283                                PPC_SRWI(r_A, r_A, K);
 284                        break;
 285                case BPF_S_ALU_NEG:
 286                        PPC_NEG(r_A, r_A);
 287                        break;
 288                case BPF_S_RET_K:
 289                        PPC_LI32(r_ret, K);
 290                        if (!K) {
 291                                if (ctx->pc_ret0 == -1)
 292                                        ctx->pc_ret0 = i;
 293                        }
 294                        /*
 295                         * If this isn't the very last instruction, branch to
 296                         * the epilogue if we've stuff to clean up.  Otherwise,
 297                         * if there's nothing to tidy, just return.  If we /are/
 298                         * the last instruction, we're about to fall through to
 299                         * the epilogue to return.
 300                         */
 301                        if (i != flen - 1) {
 302                                /*
 303                                 * Note: 'seen' is properly valid only on pass
 304                                 * #2.  Both parts of this conditional are the
 305                                 * same instruction size though, meaning the
 306                                 * first pass will still correctly determine the
 307                                 * code size/addresses.
 308                                 */
 309                                if (ctx->seen)
 310                                        PPC_JMP(exit_addr);
 311                                else
 312                                        PPC_BLR();
 313                        }
 314                        break;
 315                case BPF_S_RET_A:
 316                        PPC_MR(r_ret, r_A);
 317                        if (i != flen - 1) {
 318                                if (ctx->seen)
 319                                        PPC_JMP(exit_addr);
 320                                else
 321                                        PPC_BLR();
 322                        }
 323                        break;
 324                case BPF_S_MISC_TAX: /* X = A */
 325                        PPC_MR(r_X, r_A);
 326                        break;
 327                case BPF_S_MISC_TXA: /* A = X */
 328                        ctx->seen |= SEEN_XREG;
 329                        PPC_MR(r_A, r_X);
 330                        break;
 331
 332                        /*** Constant loads/M[] access ***/
 333                case BPF_S_LD_IMM: /* A = K */
 334                        PPC_LI32(r_A, K);
 335                        break;
 336                case BPF_S_LDX_IMM: /* X = K */
 337                        PPC_LI32(r_X, K);
 338                        break;
 339                case BPF_S_LD_MEM: /* A = mem[K] */
 340                        PPC_MR(r_A, r_M + (K & 0xf));
 341                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 342                        break;
 343                case BPF_S_LDX_MEM: /* X = mem[K] */
 344                        PPC_MR(r_X, r_M + (K & 0xf));
 345                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 346                        break;
 347                case BPF_S_ST: /* mem[K] = A */
 348                        PPC_MR(r_M + (K & 0xf), r_A);
 349                        ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
 350                        break;
 351                case BPF_S_STX: /* mem[K] = X */
 352                        PPC_MR(r_M + (K & 0xf), r_X);
 353                        ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
 354                        break;
 355                case BPF_S_LD_W_LEN: /* A = skb->len; */
 356                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 357                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
 358                        break;
 359                case BPF_S_LDX_W_LEN: /* X = skb->len; */
 360                        PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
 361                        break;
 362
 363                        /*** Ancillary info loads ***/
 364                case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
 365                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 366                                                  protocol) != 2);
 367                        PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 368                                                            protocol));
 369                        break;
 370                case BPF_S_ANC_IFINDEX:
 371                        PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
 372                                                                dev));
 373                        PPC_CMPDI(r_scratch1, 0);
 374                        if (ctx->pc_ret0 != -1) {
 375                                PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
 376                        } else {
 377                                /* Exit, returning 0; first pass hits here. */
 378                                PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
 379                                PPC_LI(r_ret, 0);
 380                                PPC_JMP(exit_addr);
 381                        }
 382                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 383                                                  ifindex) != 4);
 384                        PPC_LWZ_OFFS(r_A, r_scratch1,
 385                                     offsetof(struct net_device, ifindex));
 386                        break;
 387                case BPF_S_ANC_MARK:
 388                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 389                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 390                                                          mark));
 391                        break;
 392                case BPF_S_ANC_RXHASH:
 393                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
 394                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 395                                                          rxhash));
 396                        break;
 397                case BPF_S_ANC_VLAN_TAG:
 398                case BPF_S_ANC_VLAN_TAG_PRESENT:
 399                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 400                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 401                                                          vlan_tci));
 402                        if (filter[i].code == BPF_S_ANC_VLAN_TAG)
 403                                PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
 404                        else
 405                                PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
 406                        break;
 407                case BPF_S_ANC_QUEUE:
 408                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 409                                                  queue_mapping) != 2);
 410                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
 411                                                          queue_mapping));
 412                        break;
 413                case BPF_S_ANC_CPU:
 414#ifdef CONFIG_SMP
 415                        /*
 416                         * PACA ptr is r13:
 417                         * raw_smp_processor_id() = local_paca->paca_index
 418                         */
 419                        BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
 420                                                  paca_index) != 2);
 421                        PPC_LHZ_OFFS(r_A, 13,
 422                                     offsetof(struct paca_struct, paca_index));
 423#else
 424                        PPC_LI(r_A, 0);
 425#endif
 426                        break;
 427
 428                        /*** Absolute loads from packet header/data ***/
 429                case BPF_S_LD_W_ABS:
 430                        func = CHOOSE_LOAD_FUNC(K, sk_load_word);
 431                        goto common_load;
 432                case BPF_S_LD_H_ABS:
 433                        func = CHOOSE_LOAD_FUNC(K, sk_load_half);
 434                        goto common_load;
 435                case BPF_S_LD_B_ABS:
 436                        func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
 437                common_load:
 438                        /* Load from [K]. */
 439                        ctx->seen |= SEEN_DATAREF;
 440                        PPC_LI64(r_scratch1, func);
 441                        PPC_MTLR(r_scratch1);
 442                        PPC_LI32(r_addr, K);
 443                        PPC_BLRL();
 444                        /*
 445                         * Helper returns 'lt' condition on error, and an
 446                         * appropriate return value in r3
 447                         */
 448                        PPC_BCC(COND_LT, exit_addr);
 449                        break;
 450
 451                        /*** Indirect loads from packet header/data ***/
 452                case BPF_S_LD_W_IND:
 453                        func = sk_load_word;
 454                        goto common_load_ind;
 455                case BPF_S_LD_H_IND:
 456                        func = sk_load_half;
 457                        goto common_load_ind;
 458                case BPF_S_LD_B_IND:
 459                        func = sk_load_byte;
 460                common_load_ind:
 461                        /*
 462                         * Load from [X + K].  Negative offsets are tested for
 463                         * in the helper functions.
 464                         */
 465                        ctx->seen |= SEEN_DATAREF | SEEN_XREG;
 466                        PPC_LI64(r_scratch1, func);
 467                        PPC_MTLR(r_scratch1);
 468                        PPC_ADDI(r_addr, r_X, IMM_L(K));
 469                        if (K >= 32768)
 470                                PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
 471                        PPC_BLRL();
 472                        /* If error, cr0.LT set */
 473                        PPC_BCC(COND_LT, exit_addr);
 474                        break;
 475
 476                case BPF_S_LDX_B_MSH:
 477                        func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
 478                        goto common_load;
 479                        break;
 480
 481                        /*** Jump and branches ***/
 482                case BPF_S_JMP_JA:
 483                        if (K != 0)
 484                                PPC_JMP(addrs[i + 1 + K]);
 485                        break;
 486
 487                case BPF_S_JMP_JGT_K:
 488                case BPF_S_JMP_JGT_X:
 489                        true_cond = COND_GT;
 490                        goto cond_branch;
 491                case BPF_S_JMP_JGE_K:
 492                case BPF_S_JMP_JGE_X:
 493                        true_cond = COND_GE;
 494                        goto cond_branch;
 495                case BPF_S_JMP_JEQ_K:
 496                case BPF_S_JMP_JEQ_X:
 497                        true_cond = COND_EQ;
 498                        goto cond_branch;
 499                case BPF_S_JMP_JSET_K:
 500                case BPF_S_JMP_JSET_X:
 501                        true_cond = COND_NE;
 502                        /* Fall through */
 503                cond_branch:
 504                        /* same targets, can avoid doing the test :) */
 505                        if (filter[i].jt == filter[i].jf) {
 506                                if (filter[i].jt > 0)
 507                                        PPC_JMP(addrs[i + 1 + filter[i].jt]);
 508                                break;
 509                        }
 510
 511                        switch (filter[i].code) {
 512                        case BPF_S_JMP_JGT_X:
 513                        case BPF_S_JMP_JGE_X:
 514                        case BPF_S_JMP_JEQ_X:
 515                                ctx->seen |= SEEN_XREG;
 516                                PPC_CMPLW(r_A, r_X);
 517                                break;
 518                        case BPF_S_JMP_JSET_X:
 519                                ctx->seen |= SEEN_XREG;
 520                                PPC_AND_DOT(r_scratch1, r_A, r_X);
 521                                break;
 522                        case BPF_S_JMP_JEQ_K:
 523                        case BPF_S_JMP_JGT_K:
 524                        case BPF_S_JMP_JGE_K:
 525                                if (K < 32768)
 526                                        PPC_CMPLWI(r_A, K);
 527                                else {
 528                                        PPC_LI32(r_scratch1, K);
 529                                        PPC_CMPLW(r_A, r_scratch1);
 530                                }
 531                                break;
 532                        case BPF_S_JMP_JSET_K:
 533                                if (K < 32768)
 534                                        /* PPC_ANDI is /only/ dot-form */
 535                                        PPC_ANDI(r_scratch1, r_A, K);
 536                                else {
 537                                        PPC_LI32(r_scratch1, K);
 538                                        PPC_AND_DOT(r_scratch1, r_A,
 539                                                    r_scratch1);
 540                                }
 541                                break;
 542                        }
 543                        /* Sometimes branches are constructed "backward", with
 544                         * the false path being the branch and true path being
 545                         * a fallthrough to the next instruction.
 546                         */
 547                        if (filter[i].jt == 0)
 548                                /* Swap the sense of the branch */
 549                                PPC_BCC(true_cond ^ COND_CMP_TRUE,
 550                                        addrs[i + 1 + filter[i].jf]);
 551                        else {
 552                                PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
 553                                if (filter[i].jf != 0)
 554                                        PPC_JMP(addrs[i + 1 + filter[i].jf]);
 555                        }
 556                        break;
 557                default:
 558                        /* The filter contains something cruel & unusual.
 559                         * We don't handle it, but also there shouldn't be
 560                         * anything missing from our list.
 561                         */
 562                        if (printk_ratelimit())
 563                                pr_err("BPF filter opcode %04x (@%d) unsupported\n",
 564                                       filter[i].code, i);
 565                        return -ENOTSUPP;
 566                }
 567
 568        }
 569        /* Set end-of-body-code address for exit. */
 570        addrs[i] = ctx->idx * 4;
 571
 572        return 0;
 573}
 574
 575void bpf_jit_compile(struct sk_filter *fp)
 576{
 577        unsigned int proglen;
 578        unsigned int alloclen;
 579        u32 *image = NULL;
 580        u32 *code_base;
 581        unsigned int *addrs;
 582        struct codegen_context cgctx;
 583        int pass;
 584        int flen = fp->len;
 585
 586        if (!bpf_jit_enable)
 587                return;
 588
 589        addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
 590        if (addrs == NULL)
 591                return;
 592
 593        /*
 594         * There are multiple assembly passes as the generated code will change
 595         * size as it settles down, figuring out the max branch offsets/exit
 596         * paths required.
 597         *
 598         * The range of standard conditional branches is +/- 32Kbytes.  Since
 599         * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
 600         * finish with 8 bytes/instruction.  Not feasible, so long jumps are
 601         * used, distinct from short branches.
 602         *
 603         * Current:
 604         *
 605         * For now, both branch types assemble to 2 words (short branches padded
 606         * with a NOP); this is less efficient, but assembly will always complete
 607         * after exactly 3 passes:
 608         *
 609         * First pass: No code buffer; Program is "faux-generated" -- no code
 610         * emitted but maximum size of output determined (and addrs[] filled
 611         * in).  Also, we note whether we use M[], whether we use skb data, etc.
 612         * All generation choices assumed to be 'worst-case', e.g. branches all
 613         * far (2 instructions), return path code reduction not available, etc.
 614         *
 615         * Second pass: Code buffer allocated with size determined previously.
 616         * Prologue generated to support features we have seen used.  Exit paths
 617         * determined and addrs[] is filled in again, as code may be slightly
 618         * smaller as a result.
 619         *
 620         * Third pass: Code generated 'for real', and branch destinations
 621         * determined from now-accurate addrs[] map.
 622         *
 623         * Ideal:
 624         *
 625         * If we optimise this, near branches will be shorter.  On the
 626         * first assembly pass, we should err on the side of caution and
 627         * generate the biggest code.  On subsequent passes, branches will be
 628         * generated short or long and code size will reduce.  With smaller
 629         * code, more branches may fall into the short category, and code will
 630         * reduce more.
 631         *
 632         * Finally, if we see one pass generate code the same size as the
 633         * previous pass we have converged and should now generate code for
 634         * real.  Allocating at the end will also save the memory that would
 635         * otherwise be wasted by the (small) current code shrinkage.
 636         * Preferably, we should do a small number of passes (e.g. 5) and if we
 637         * haven't converged by then, get impatient and force code to generate
 638         * as-is, even if the odd branch would be left long.  The chances of a
 639         * long jump are tiny with all but the most enormous of BPF filter
 640         * inputs, so we should usually converge on the third pass.
 641         */
 642
 643        cgctx.idx = 0;
 644        cgctx.seen = 0;
 645        cgctx.pc_ret0 = -1;
 646        /* Scouting faux-generate pass 0 */
 647        if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
 648                /* We hit something illegal or unsupported. */
 649                goto out;
 650
 651        /*
 652         * Pretend to build prologue, given the features we've seen.  This will
 653         * update ctgtx.idx as it pretends to output instructions, then we can
 654         * calculate total size from idx.
 655         */
 656        bpf_jit_build_prologue(fp, 0, &cgctx);
 657        bpf_jit_build_epilogue(0, &cgctx);
 658
 659        proglen = cgctx.idx * 4;
 660        alloclen = proglen + FUNCTION_DESCR_SIZE;
 661        image = module_alloc(alloclen);
 662        if (!image)
 663                goto out;
 664
 665        code_base = image + (FUNCTION_DESCR_SIZE/4);
 666
 667        /* Code generation passes 1-2 */
 668        for (pass = 1; pass < 3; pass++) {
 669                /* Now build the prologue, body code & epilogue for real. */
 670                cgctx.idx = 0;
 671                bpf_jit_build_prologue(fp, code_base, &cgctx);
 672                bpf_jit_build_body(fp, code_base, &cgctx, addrs);
 673                bpf_jit_build_epilogue(code_base, &cgctx);
 674
 675                if (bpf_jit_enable > 1)
 676                        pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
 677                                proglen - (cgctx.idx * 4), cgctx.seen);
 678        }
 679
 680        if (bpf_jit_enable > 1)
 681                /* Note that we output the base address of the code_base
 682                 * rather than image, since opcodes are in code_base.
 683                 */
 684                bpf_jit_dump(flen, proglen, pass, code_base);
 685
 686        if (image) {
 687                bpf_flush_icache(code_base, code_base + (proglen/4));
 688                /* Function descriptor nastiness: Address + TOC */
 689                ((u64 *)image)[0] = (u64)code_base;
 690                ((u64 *)image)[1] = local_paca->kernel_toc;
 691                fp->bpf_func = (void *)image;
 692        }
 693out:
 694        kfree(addrs);
 695        return;
 696}
 697
 698void bpf_jit_free(struct sk_filter *fp)
 699{
 700        if (fp->bpf_func != sk_run_filter)
 701                module_free(NULL, fp->bpf_func);
 702        kfree(fp);
 703}
 704