linux/arch/mips/net/bpf_jit_asm.S
<<
>>
Prefs
   1/*
   2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
   3 * compiler.
   4 *
   5 * Copyright (C) 2015 Imagination Technologies Ltd.
   6 * Author: Markos Chandras <markos.chandras@imgtec.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License as published by the
  10 * Free Software Foundation; version 2 of the License.
  11 */
  12
  13#include <asm/asm.h>
  14#include <asm/isa-rev.h>
  15#include <asm/regdef.h>
  16#include "bpf_jit.h"
  17
  18/* ABI
  19 *
  20 * r_skb_hl     skb header length
  21 * r_skb_data   skb data
  22 * r_off(a1)    offset register
  23 * r_A          BPF register A
  24 * r_X          PF register X
  25 * r_skb(a0)    *skb
  26 * r_M          *scratch memory
  27 * r_skb_le     skb length
  28 * r_s0         Scratch register 0
  29 * r_s1         Scratch register 1
  30 *
  31 * On entry:
  32 * a0: *skb
  33 * a1: offset (imm or imm + X)
  34 *
  35 * All non-BPF-ABI registers are free for use. On return, we only
  36 * care about r_ret. The BPF-ABI registers are assumed to remain
  37 * unmodified during the entire filter operation.
  38 */
  39
  40#define skb     a0
  41#define offset  a1
  42#define SKF_LL_OFF  (-0x200000) /* Can't include linux/filter.h in assembly */
  43
  44        /* We know better :) so prevent assembler reordering etc */
  45        .set    noreorder
  46
  47#define is_offset_negative(TYPE)                                \
  48        /* If offset is negative we have more work to do */     \
  49        slti    t0, offset, 0;                                  \
  50        bgtz    t0, bpf_slow_path_##TYPE##_neg;                 \
  51        /* Be careful what follows in DS. */
  52
  53#define is_offset_in_header(SIZE, TYPE)                         \
  54        /* Reading from header? */                              \
  55        addiu   $r_s0, $r_skb_hl, -SIZE;                        \
  56        slt     t0, $r_s0, offset;                              \
  57        bgtz    t0, bpf_slow_path_##TYPE;                       \
  58
  59LEAF(sk_load_word)
  60        is_offset_negative(word)
  61FEXPORT(sk_load_word_positive)
  62        is_offset_in_header(4, word)
  63        /* Offset within header boundaries */
  64        PTR_ADDU t1, $r_skb_data, offset
  65        .set    reorder
  66        lw      $r_A, 0(t1)
  67        .set    noreorder
  68#ifdef CONFIG_CPU_LITTLE_ENDIAN
  69# if MIPS_ISA_REV >= 2
  70        wsbh    t0, $r_A
  71        rotr    $r_A, t0, 16
  72# else
  73        sll     t0, $r_A, 24
  74        srl     t1, $r_A, 24
  75        srl     t2, $r_A, 8
  76        or      t0, t0, t1
  77        andi    t2, t2, 0xff00
  78        andi    t1, $r_A, 0xff00
  79        or      t0, t0, t2
  80        sll     t1, t1, 8
  81        or      $r_A, t0, t1
  82# endif
  83#endif
  84        jr      $r_ra
  85         move   $r_ret, zero
  86        END(sk_load_word)
  87
  88LEAF(sk_load_half)
  89        is_offset_negative(half)
  90FEXPORT(sk_load_half_positive)
  91        is_offset_in_header(2, half)
  92        /* Offset within header boundaries */
  93        PTR_ADDU t1, $r_skb_data, offset
  94        lhu     $r_A, 0(t1)
  95#ifdef CONFIG_CPU_LITTLE_ENDIAN
  96# if MIPS_ISA_REV >= 2
  97        wsbh    $r_A, $r_A
  98# else
  99        sll     t0, $r_A, 8
 100        srl     t1, $r_A, 8
 101        andi    t0, t0, 0xff00
 102        or      $r_A, t0, t1
 103# endif
 104#endif
 105        jr      $r_ra
 106         move   $r_ret, zero
 107        END(sk_load_half)
 108
 109LEAF(sk_load_byte)
 110        is_offset_negative(byte)
 111FEXPORT(sk_load_byte_positive)
 112        is_offset_in_header(1, byte)
 113        /* Offset within header boundaries */
 114        PTR_ADDU t1, $r_skb_data, offset
 115        lbu     $r_A, 0(t1)
 116        jr      $r_ra
 117         move   $r_ret, zero
 118        END(sk_load_byte)
 119
 120/*
 121 * call skb_copy_bits:
 122 * (prototype in linux/skbuff.h)
 123 *
 124 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
 125 *
 126 * o32 mandates we leave 4 spaces for argument registers in case
 127 * the callee needs to use them. Even though we don't care about
 128 * the argument registers ourselves, we need to allocate that space
 129 * to remain ABI compliant since the callee may want to use that space.
 130 * We also allocate 2 more spaces for $r_ra and our return register (*to).
 131 *
 132 * n64 is a bit different. The *caller* will allocate the space to preserve
 133 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
 134 * good reason but it does not matter that much really.
 135 *
 136 * (void *to) is returned in r_s0
 137 *
 138 */
 139#ifdef CONFIG_CPU_LITTLE_ENDIAN
 140#define DS_OFFSET(SIZE) (4 * SZREG)
 141#else
 142#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
 143#endif
 144#define bpf_slow_path_common(SIZE)                              \
 145        /* Quick check. Are we within reasonable boundaries? */ \
 146        LONG_ADDIU      $r_s1, $r_skb_len, -SIZE;               \
 147        sltu            $r_s0, offset, $r_s1;                   \
 148        beqz            $r_s0, fault;                           \
 149        /* Load 4th argument in DS */                           \
 150         LONG_ADDIU     a3, zero, SIZE;                         \
 151        PTR_ADDIU       $r_sp, $r_sp, -(6 * SZREG);             \
 152        PTR_LA          t0, skb_copy_bits;                      \
 153        PTR_S           $r_ra, (5 * SZREG)($r_sp);              \
 154        /* Assign low slot to a2 */                             \
 155        PTR_ADDIU       a2, $r_sp, DS_OFFSET(SIZE);             \
 156        jalr            t0;                                     \
 157        /* Reset our destination slot (DS but it's ok) */       \
 158         INT_S          zero, (4 * SZREG)($r_sp);               \
 159        /*                                                      \
 160         * skb_copy_bits returns 0 on success and -EFAULT       \
 161         * on error. Our data live in a2. Do not bother with    \
 162         * our data if an error has been returned.              \
 163         */                                                     \
 164        /* Restore our frame */                                 \
 165        PTR_L           $r_ra, (5 * SZREG)($r_sp);              \
 166        INT_L           $r_s0, (4 * SZREG)($r_sp);              \
 167        bltz            v0, fault;                              \
 168         PTR_ADDIU      $r_sp, $r_sp, 6 * SZREG;                \
 169        move            $r_ret, zero;                           \
 170
 171NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
 172        bpf_slow_path_common(4)
 173#ifdef CONFIG_CPU_LITTLE_ENDIAN
 174# if MIPS_ISA_REV >= 2
 175        wsbh    t0, $r_s0
 176        jr      $r_ra
 177         rotr   $r_A, t0, 16
 178# else
 179        sll     t0, $r_s0, 24
 180        srl     t1, $r_s0, 24
 181        srl     t2, $r_s0, 8
 182        or      t0, t0, t1
 183        andi    t2, t2, 0xff00
 184        andi    t1, $r_s0, 0xff00
 185        or      t0, t0, t2
 186        sll     t1, t1, 8
 187        jr      $r_ra
 188         or     $r_A, t0, t1
 189# endif
 190#else
 191        jr      $r_ra
 192         move   $r_A, $r_s0
 193#endif
 194
 195        END(bpf_slow_path_word)
 196
 197NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
 198        bpf_slow_path_common(2)
 199#ifdef CONFIG_CPU_LITTLE_ENDIAN
 200# if MIPS_ISA_REV >= 2
 201        jr      $r_ra
 202         wsbh   $r_A, $r_s0
 203# else
 204        sll     t0, $r_s0, 8
 205        andi    t1, $r_s0, 0xff00
 206        andi    t0, t0, 0xff00
 207        srl     t1, t1, 8
 208        jr      $r_ra
 209         or     $r_A, t0, t1
 210# endif
 211#else
 212        jr      $r_ra
 213         move   $r_A, $r_s0
 214#endif
 215
 216        END(bpf_slow_path_half)
 217
 218NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
 219        bpf_slow_path_common(1)
 220        jr      $r_ra
 221         move   $r_A, $r_s0
 222
 223        END(bpf_slow_path_byte)
 224
 225/*
 226 * Negative entry points
 227 */
 228        .macro bpf_is_end_of_data
 229        li      t0, SKF_LL_OFF
 230        /* Reading link layer data? */
 231        slt     t1, offset, t0
 232        bgtz    t1, fault
 233        /* Be careful what follows in DS. */
 234        .endm
 235/*
 236 * call skb_copy_bits:
 237 * (prototype in linux/filter.h)
 238 *
 239 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
 240 *                                            int k, unsigned int size)
 241 *
 242 * see above (bpf_slow_path_common) for ABI restrictions
 243 */
 244#define bpf_negative_common(SIZE)                                       \
 245        PTR_ADDIU       $r_sp, $r_sp, -(6 * SZREG);                     \
 246        PTR_LA          t0, bpf_internal_load_pointer_neg_helper;       \
 247        PTR_S           $r_ra, (5 * SZREG)($r_sp);                      \
 248        jalr            t0;                                             \
 249         li             a2, SIZE;                                       \
 250        PTR_L           $r_ra, (5 * SZREG)($r_sp);                      \
 251        /* Check return pointer */                                      \
 252        beqz            v0, fault;                                      \
 253         PTR_ADDIU      $r_sp, $r_sp, 6 * SZREG;                        \
 254        /* Preserve our pointer */                                      \
 255        move            $r_s0, v0;                                      \
 256        /* Set return value */                                          \
 257        move            $r_ret, zero;                                   \
 258
 259bpf_slow_path_word_neg:
 260        bpf_is_end_of_data
 261NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
 262        bpf_negative_common(4)
 263        jr      $r_ra
 264         lw     $r_A, 0($r_s0)
 265        END(sk_load_word_negative)
 266
 267bpf_slow_path_half_neg:
 268        bpf_is_end_of_data
 269NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
 270        bpf_negative_common(2)
 271        jr      $r_ra
 272         lhu    $r_A, 0($r_s0)
 273        END(sk_load_half_negative)
 274
 275bpf_slow_path_byte_neg:
 276        bpf_is_end_of_data
 277NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
 278        bpf_negative_common(1)
 279        jr      $r_ra
 280         lbu    $r_A, 0($r_s0)
 281        END(sk_load_byte_negative)
 282
 283fault:
 284        jr      $r_ra
 285         addiu $r_ret, zero, 1
 286