linux/arch/powerpc/net/bpf_jit_asm.S
<<
>>
Prefs
   1/* bpf_jit.S: Packet/header access helper functions
   2 * for PPC64 BPF compiler.
   3 *
   4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; version 2
   9 * of the License.
  10 */
  11
  12#include <asm/ppc_asm.h>
  13#include "bpf_jit32.h"
  14
  15/*
  16 * All of these routines are called directly from generated code,
  17 * whose register usage is:
  18 *
  19 * r3           skb
  20 * r4,r5        A,X
  21 * r6           *** address parameter to helper ***
  22 * r7-r10       scratch
  23 * r14          skb->data
  24 * r15          skb headlen
  25 * r16-31       M[]
  26 */
  27
  28/*
  29 * To consider: These helpers are so small it could be better to just
  30 * generate them inline.  Inline code can do the simple headlen check
  31 * then branch directly to slow_path_XXX if required.  (In fact, could
  32 * load a spare GPR with the address of slow_path_generic and pass size
  33 * as an argument, making the call site a mtlr, li and bllr.)
  34 */
  35        .globl  sk_load_word
  36sk_load_word:
  37        PPC_LCMPI       r_addr, 0
  38        blt     bpf_slow_path_word_neg
  39        .globl  sk_load_word_positive_offset
  40sk_load_word_positive_offset:
  41        /* Are we accessing past headlen? */
  42        subi    r_scratch1, r_HL, 4
  43        PPC_LCMP        r_scratch1, r_addr
  44        blt     bpf_slow_path_word
  45        /* Nope, just hitting the header.  cr0 here is eq or gt! */
  46#ifdef __LITTLE_ENDIAN__
  47        lwbrx   r_A, r_D, r_addr
  48#else
  49        lwzx    r_A, r_D, r_addr
  50#endif
  51        blr     /* Return success, cr0 != LT */
  52
  53        .globl  sk_load_half
  54sk_load_half:
  55        PPC_LCMPI       r_addr, 0
  56        blt     bpf_slow_path_half_neg
  57        .globl  sk_load_half_positive_offset
  58sk_load_half_positive_offset:
  59        subi    r_scratch1, r_HL, 2
  60        PPC_LCMP        r_scratch1, r_addr
  61        blt     bpf_slow_path_half
  62#ifdef __LITTLE_ENDIAN__
  63        lhbrx   r_A, r_D, r_addr
  64#else
  65        lhzx    r_A, r_D, r_addr
  66#endif
  67        blr
  68
  69        .globl  sk_load_byte
  70sk_load_byte:
  71        PPC_LCMPI       r_addr, 0
  72        blt     bpf_slow_path_byte_neg
  73        .globl  sk_load_byte_positive_offset
  74sk_load_byte_positive_offset:
  75        PPC_LCMP        r_HL, r_addr
  76        ble     bpf_slow_path_byte
  77        lbzx    r_A, r_D, r_addr
  78        blr
  79
  80/*
  81 * BPF_LDX | BPF_B | BPF_MSH: ldxb  4*([offset]&0xf)
  82 * r_addr is the offset value
  83 */
  84        .globl sk_load_byte_msh
  85sk_load_byte_msh:
  86        PPC_LCMPI       r_addr, 0
  87        blt     bpf_slow_path_byte_msh_neg
  88        .globl sk_load_byte_msh_positive_offset
  89sk_load_byte_msh_positive_offset:
  90        PPC_LCMP        r_HL, r_addr
  91        ble     bpf_slow_path_byte_msh
  92        lbzx    r_X, r_D, r_addr
  93        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
  94        blr
  95
  96/* Call out to skb_copy_bits:
  97 * We'll need to back up our volatile regs first; we have
  98 * local variable space at r1+(BPF_PPC_STACK_BASIC).
  99 * Allocate a new stack frame here to remain ABI-compliant in
 100 * stashing LR.
 101 */
 102#define bpf_slow_path_common(SIZE)                              \
 103        mflr    r0;                                             \
 104        PPC_STL r0, PPC_LR_STKOFF(r1);                                  \
 105        /* R3 goes in parameter space of caller's frame */      \
 106        PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
 107        PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
 108        PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
 109        addi    r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ);         \
 110        PPC_STLU        r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
 111        /* R3 = r_skb, as passed */                             \
 112        mr      r4, r_addr;                                     \
 113        li      r6, SIZE;                                       \
 114        bl      skb_copy_bits;                                  \
 115        nop;                                                    \
 116        /* R3 = 0 on success */                                 \
 117        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
 118        PPC_LL  r0, PPC_LR_STKOFF(r1);                                  \
 119        PPC_LL  r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
 120        PPC_LL  r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
 121        mtlr    r0;                                             \
 122        PPC_LCMPI       r3, 0;                                          \
 123        blt     bpf_error;      /* cr0 = LT */                  \
 124        PPC_LL  r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
 125        /* Great success! */
 126
 127bpf_slow_path_word:
 128        bpf_slow_path_common(4)
 129        /* Data value is on stack, and cr0 != LT */
 130        lwz     r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
 131        blr
 132
 133bpf_slow_path_half:
 134        bpf_slow_path_common(2)
 135        lhz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
 136        blr
 137
 138bpf_slow_path_byte:
 139        bpf_slow_path_common(1)
 140        lbz     r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
 141        blr
 142
 143bpf_slow_path_byte_msh:
 144        bpf_slow_path_common(1)
 145        lbz     r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
 146        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
 147        blr
 148
 149/* Call out to bpf_internal_load_pointer_neg_helper:
 150 * We'll need to back up our volatile regs first; we have
 151 * local variable space at r1+(BPF_PPC_STACK_BASIC).
 152 * Allocate a new stack frame here to remain ABI-compliant in
 153 * stashing LR.
 154 */
 155#define sk_negative_common(SIZE)                                \
 156        mflr    r0;                                             \
 157        PPC_STL r0, PPC_LR_STKOFF(r1);                                  \
 158        /* R3 goes in parameter space of caller's frame */      \
 159        PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
 160        PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
 161        PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
 162        PPC_STLU        r1, -BPF_PPC_SLOWPATH_FRAME(r1);                \
 163        /* R3 = r_skb, as passed */                             \
 164        mr      r4, r_addr;                                     \
 165        li      r5, SIZE;                                       \
 166        bl      bpf_internal_load_pointer_neg_helper;           \
 167        nop;                                                    \
 168        /* R3 != 0 on success */                                \
 169        addi    r1, r1, BPF_PPC_SLOWPATH_FRAME;                 \
 170        PPC_LL  r0, PPC_LR_STKOFF(r1);                                  \
 171        PPC_LL  r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1);              \
 172        PPC_LL  r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1);              \
 173        mtlr    r0;                                             \
 174        PPC_LCMPLI      r3, 0;                                          \
 175        beq     bpf_error_slow; /* cr0 = EQ */                  \
 176        mr      r_addr, r3;                                     \
 177        PPC_LL  r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1);           \
 178        /* Great success! */
 179
 180bpf_slow_path_word_neg:
 181        lis     r_scratch1,-32  /* SKF_LL_OFF */
 182        PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
 183        blt     bpf_error       /* cr0 = LT */
 184        .globl  sk_load_word_negative_offset
 185sk_load_word_negative_offset:
 186        sk_negative_common(4)
 187        lwz     r_A, 0(r_addr)
 188        blr
 189
 190bpf_slow_path_half_neg:
 191        lis     r_scratch1,-32  /* SKF_LL_OFF */
 192        PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
 193        blt     bpf_error       /* cr0 = LT */
 194        .globl  sk_load_half_negative_offset
 195sk_load_half_negative_offset:
 196        sk_negative_common(2)
 197        lhz     r_A, 0(r_addr)
 198        blr
 199
 200bpf_slow_path_byte_neg:
 201        lis     r_scratch1,-32  /* SKF_LL_OFF */
 202        PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
 203        blt     bpf_error       /* cr0 = LT */
 204        .globl  sk_load_byte_negative_offset
 205sk_load_byte_negative_offset:
 206        sk_negative_common(1)
 207        lbz     r_A, 0(r_addr)
 208        blr
 209
 210bpf_slow_path_byte_msh_neg:
 211        lis     r_scratch1,-32  /* SKF_LL_OFF */
 212        PPC_LCMP        r_addr, r_scratch1      /* addr < SKF_* */
 213        blt     bpf_error       /* cr0 = LT */
 214        .globl  sk_load_byte_msh_negative_offset
 215sk_load_byte_msh_negative_offset:
 216        sk_negative_common(1)
 217        lbz     r_X, 0(r_addr)
 218        rlwinm  r_X, r_X, 2, 32-4-2, 31-2
 219        blr
 220
 221bpf_error_slow:
 222        /* fabricate a cr0 = lt */
 223        li      r_scratch1, -1
 224        PPC_LCMPI       r_scratch1, 0
 225bpf_error:
 226        /* Entered with cr0 = lt */
 227        li      r3, 0
 228        /* Generated code will 'blt epilogue', returning 0. */
 229        blr
 230