linux/net/core/filter.c
<<
>>
Prefs
   1/*
   2 * Linux Socket Filter - Kernel level socket filtering
   3 *
   4 * Author:
   5 *     Jay Schulist <jschlst@samba.org>
   6 *
   7 * Based on the design of:
   8 *     - The Berkeley Packet Filter
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License
  12 * as published by the Free Software Foundation; either version
  13 * 2 of the License, or (at your option) any later version.
  14 *
  15 * Andi Kleen - Fix a few bad bugs and races.
  16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/types.h>
  21#include <linux/mm.h>
  22#include <linux/fcntl.h>
  23#include <linux/socket.h>
  24#include <linux/in.h>
  25#include <linux/inet.h>
  26#include <linux/netdevice.h>
  27#include <linux/if_packet.h>
  28#include <linux/gfp.h>
  29#include <net/ip.h>
  30#include <net/protocol.h>
  31#include <net/netlink.h>
  32#include <linux/skbuff.h>
  33#include <net/sock.h>
  34#include <linux/errno.h>
  35#include <linux/timer.h>
  36#include <asm/system.h>
  37#include <asm/uaccess.h>
  38#include <asm/unaligned.h>
  39#include <linux/filter.h>
  40#include <linux/reciprocal_div.h>
  41#include <linux/ratelimit.h>
  42
  43/* No hurry in this branch */
  44static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
  45{
  46        u8 *ptr = NULL;
  47
  48        if (k >= SKF_NET_OFF)
  49                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
  50        else if (k >= SKF_LL_OFF)
  51                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
  52
  53        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
  54                return ptr;
  55        return NULL;
  56}
  57
  58static inline void *load_pointer(const struct sk_buff *skb, int k,
  59                                 unsigned int size, void *buffer)
  60{
  61        if (k >= 0)
  62                return skb_header_pointer(skb, k, size, buffer);
  63        return __load_pointer(skb, k, size);
  64}
  65
  66/**
  67 *      sk_filter - run a packet through a socket filter
  68 *      @sk: sock associated with &sk_buff
  69 *      @skb: buffer to filter
  70 *
  71 * Run the filter code and then cut skb->data to correct size returned by
  72 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
  73 * than pkt_len we keep whole skb->data. This is the socket level
  74 * wrapper to sk_run_filter. It returns 0 if the packet should
  75 * be accepted or -EPERM if the packet should be tossed.
  76 *
  77 */
  78int sk_filter(struct sock *sk, struct sk_buff *skb)
  79{
  80        int err;
  81        struct sk_filter *filter;
  82
  83        err = security_sock_rcv_skb(sk, skb);
  84        if (err)
  85                return err;
  86
  87        rcu_read_lock();
  88        filter = rcu_dereference(sk->sk_filter);
  89        if (filter) {
  90                unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
  91
  92                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
  93        }
  94        rcu_read_unlock();
  95
  96        return err;
  97}
  98EXPORT_SYMBOL(sk_filter);
  99
 100/**
 101 *      sk_run_filter - run a filter on a socket
 102 *      @skb: buffer to run the filter on
 103 *      @fentry: filter to apply
 104 *
 105 * Decode and apply filter instructions to the skb->data.
 106 * Return length to keep, 0 for none. @skb is the data we are
 107 * filtering, @filter is the array of filter instructions.
 108 * Because all jumps are guaranteed to be before last instruction,
 109 * and last instruction guaranteed to be a RET, we dont need to check
 110 * flen. (We used to pass to this function the length of filter)
 111 */
 112unsigned int sk_run_filter(const struct sk_buff *skb,
 113                           const struct sock_filter *fentry)
 114{
 115        void *ptr;
 116        u32 A = 0;                      /* Accumulator */
 117        u32 X = 0;                      /* Index Register */
 118        u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
 119        u32 tmp;
 120        int k;
 121
 122        /*
 123         * Process array of filter instructions.
 124         */
 125        for (;; fentry++) {
 126#if defined(CONFIG_X86_32)
 127#define K (fentry->k)
 128#else
 129                const u32 K = fentry->k;
 130#endif
 131
 132                switch (fentry->code) {
 133                case BPF_S_ALU_ADD_X:
 134                        A += X;
 135                        continue;
 136                case BPF_S_ALU_ADD_K:
 137                        A += K;
 138                        continue;
 139                case BPF_S_ALU_SUB_X:
 140                        A -= X;
 141                        continue;
 142                case BPF_S_ALU_SUB_K:
 143                        A -= K;
 144                        continue;
 145                case BPF_S_ALU_MUL_X:
 146                        A *= X;
 147                        continue;
 148                case BPF_S_ALU_MUL_K:
 149                        A *= K;
 150                        continue;
 151                case BPF_S_ALU_DIV_X:
 152                        if (X == 0)
 153                                return 0;
 154                        A /= X;
 155                        continue;
 156                case BPF_S_ALU_DIV_K:
 157                        A = reciprocal_divide(A, K);
 158                        continue;
 159                case BPF_S_ALU_AND_X:
 160                        A &= X;
 161                        continue;
 162                case BPF_S_ALU_AND_K:
 163                        A &= K;
 164                        continue;
 165                case BPF_S_ALU_OR_X:
 166                        A |= X;
 167                        continue;
 168                case BPF_S_ALU_OR_K:
 169                        A |= K;
 170                        continue;
 171                case BPF_S_ALU_LSH_X:
 172                        A <<= X;
 173                        continue;
 174                case BPF_S_ALU_LSH_K:
 175                        A <<= K;
 176                        continue;
 177                case BPF_S_ALU_RSH_X:
 178                        A >>= X;
 179                        continue;
 180                case BPF_S_ALU_RSH_K:
 181                        A >>= K;
 182                        continue;
 183                case BPF_S_ALU_NEG:
 184                        A = -A;
 185                        continue;
 186                case BPF_S_JMP_JA:
 187                        fentry += K;
 188                        continue;
 189                case BPF_S_JMP_JGT_K:
 190                        fentry += (A > K) ? fentry->jt : fentry->jf;
 191                        continue;
 192                case BPF_S_JMP_JGE_K:
 193                        fentry += (A >= K) ? fentry->jt : fentry->jf;
 194                        continue;
 195                case BPF_S_JMP_JEQ_K:
 196                        fentry += (A == K) ? fentry->jt : fentry->jf;
 197                        continue;
 198                case BPF_S_JMP_JSET_K:
 199                        fentry += (A & K) ? fentry->jt : fentry->jf;
 200                        continue;
 201                case BPF_S_JMP_JGT_X:
 202                        fentry += (A > X) ? fentry->jt : fentry->jf;
 203                        continue;
 204                case BPF_S_JMP_JGE_X:
 205                        fentry += (A >= X) ? fentry->jt : fentry->jf;
 206                        continue;
 207                case BPF_S_JMP_JEQ_X:
 208                        fentry += (A == X) ? fentry->jt : fentry->jf;
 209                        continue;
 210                case BPF_S_JMP_JSET_X:
 211                        fentry += (A & X) ? fentry->jt : fentry->jf;
 212                        continue;
 213                case BPF_S_LD_W_ABS:
 214                        k = K;
 215load_w:
 216                        ptr = load_pointer(skb, k, 4, &tmp);
 217                        if (ptr != NULL) {
 218                                A = get_unaligned_be32(ptr);
 219                                continue;
 220                        }
 221                        return 0;
 222                case BPF_S_LD_H_ABS:
 223                        k = K;
 224load_h:
 225                        ptr = load_pointer(skb, k, 2, &tmp);
 226                        if (ptr != NULL) {
 227                                A = get_unaligned_be16(ptr);
 228                                continue;
 229                        }
 230                        return 0;
 231                case BPF_S_LD_B_ABS:
 232                        k = K;
 233load_b:
 234                        ptr = load_pointer(skb, k, 1, &tmp);
 235                        if (ptr != NULL) {
 236                                A = *(u8 *)ptr;
 237                                continue;
 238                        }
 239                        return 0;
 240                case BPF_S_LD_W_LEN:
 241                        A = skb->len;
 242                        continue;
 243                case BPF_S_LDX_W_LEN:
 244                        X = skb->len;
 245                        continue;
 246                case BPF_S_LD_W_IND:
 247                        k = X + K;
 248                        goto load_w;
 249                case BPF_S_LD_H_IND:
 250                        k = X + K;
 251                        goto load_h;
 252                case BPF_S_LD_B_IND:
 253                        k = X + K;
 254                        goto load_b;
 255                case BPF_S_LDX_B_MSH:
 256                        ptr = load_pointer(skb, K, 1, &tmp);
 257                        if (ptr != NULL) {
 258                                X = (*(u8 *)ptr & 0xf) << 2;
 259                                continue;
 260                        }
 261                        return 0;
 262                case BPF_S_LD_IMM:
 263                        A = K;
 264                        continue;
 265                case BPF_S_LDX_IMM:
 266                        X = K;
 267                        continue;
 268                case BPF_S_LD_MEM:
 269                        A = mem[K];
 270                        continue;
 271                case BPF_S_LDX_MEM:
 272                        X = mem[K];
 273                        continue;
 274                case BPF_S_MISC_TAX:
 275                        X = A;
 276                        continue;
 277                case BPF_S_MISC_TXA:
 278                        A = X;
 279                        continue;
 280                case BPF_S_RET_K:
 281                        return K;
 282                case BPF_S_RET_A:
 283                        return A;
 284                case BPF_S_ST:
 285                        mem[K] = A;
 286                        continue;
 287                case BPF_S_STX:
 288                        mem[K] = X;
 289                        continue;
 290                case BPF_S_ANC_PROTOCOL:
 291                        A = ntohs(skb->protocol);
 292                        continue;
 293                case BPF_S_ANC_PKTTYPE:
 294                        A = skb->pkt_type;
 295                        continue;
 296                case BPF_S_ANC_IFINDEX:
 297                        if (!skb->dev)
 298                                return 0;
 299                        A = skb->dev->ifindex;
 300                        continue;
 301                case BPF_S_ANC_MARK:
 302                        A = skb->mark;
 303                        continue;
 304                case BPF_S_ANC_QUEUE:
 305                        A = skb->queue_mapping;
 306                        continue;
 307                case BPF_S_ANC_HATYPE:
 308                        if (!skb->dev)
 309                                return 0;
 310                        A = skb->dev->type;
 311                        continue;
 312                case BPF_S_ANC_RXHASH:
 313                        A = skb->rxhash;
 314                        continue;
 315                case BPF_S_ANC_CPU:
 316                        A = raw_smp_processor_id();
 317                        continue;
 318                case BPF_S_ANC_NLATTR: {
 319                        struct nlattr *nla;
 320
 321                        if (skb_is_nonlinear(skb))
 322                                return 0;
 323                        if (A > skb->len - sizeof(struct nlattr))
 324                                return 0;
 325
 326                        nla = nla_find((struct nlattr *)&skb->data[A],
 327                                       skb->len - A, X);
 328                        if (nla)
 329                                A = (void *)nla - (void *)skb->data;
 330                        else
 331                                A = 0;
 332                        continue;
 333                }
 334                case BPF_S_ANC_NLATTR_NEST: {
 335                        struct nlattr *nla;
 336
 337                        if (skb_is_nonlinear(skb))
 338                                return 0;
 339                        if (A > skb->len - sizeof(struct nlattr))
 340                                return 0;
 341
 342                        nla = (struct nlattr *)&skb->data[A];
 343                        if (nla->nla_len > A - skb->len)
 344                                return 0;
 345
 346                        nla = nla_find_nested(nla, X);
 347                        if (nla)
 348                                A = (void *)nla - (void *)skb->data;
 349                        else
 350                                A = 0;
 351                        continue;
 352                }
 353                default:
 354                        WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
 355                                       fentry->code, fentry->jt,
 356                                       fentry->jf, fentry->k);
 357                        return 0;
 358                }
 359        }
 360
 361        return 0;
 362}
 363EXPORT_SYMBOL(sk_run_filter);
 364
 365/*
 366 * Security :
 367 * A BPF program is able to use 16 cells of memory to store intermediate
 368 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
 369 * As we dont want to clear mem[] array for each packet going through
 370 * sk_run_filter(), we check that filter loaded by user never try to read
 371 * a cell if not previously written, and we check all branches to be sure
 372 * a malicious user doesn't try to abuse us.
 373 */
 374static int check_load_and_stores(struct sock_filter *filter, int flen)
 375{
 376        u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
 377        int pc, ret = 0;
 378
 379        BUILD_BUG_ON(BPF_MEMWORDS > 16);
 380        masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
 381        if (!masks)
 382                return -ENOMEM;
 383        memset(masks, 0xff, flen * sizeof(*masks));
 384
 385        for (pc = 0; pc < flen; pc++) {
 386                memvalid &= masks[pc];
 387
 388                switch (filter[pc].code) {
 389                case BPF_S_ST:
 390                case BPF_S_STX:
 391                        memvalid |= (1 << filter[pc].k);
 392                        break;
 393                case BPF_S_LD_MEM:
 394                case BPF_S_LDX_MEM:
 395                        if (!(memvalid & (1 << filter[pc].k))) {
 396                                ret = -EINVAL;
 397                                goto error;
 398                        }
 399                        break;
 400                case BPF_S_JMP_JA:
 401                        /* a jump must set masks on target */
 402                        masks[pc + 1 + filter[pc].k] &= memvalid;
 403                        memvalid = ~0;
 404                        break;
 405                case BPF_S_JMP_JEQ_K:
 406                case BPF_S_JMP_JEQ_X:
 407                case BPF_S_JMP_JGE_K:
 408                case BPF_S_JMP_JGE_X:
 409                case BPF_S_JMP_JGT_K:
 410                case BPF_S_JMP_JGT_X:
 411                case BPF_S_JMP_JSET_X:
 412                case BPF_S_JMP_JSET_K:
 413                        /* a jump must set masks on targets */
 414                        masks[pc + 1 + filter[pc].jt] &= memvalid;
 415                        masks[pc + 1 + filter[pc].jf] &= memvalid;
 416                        memvalid = ~0;
 417                        break;
 418                }
 419        }
 420error:
 421        kfree(masks);
 422        return ret;
 423}
 424
 425/**
 426 *      sk_chk_filter - verify socket filter code
 427 *      @filter: filter to verify
 428 *      @flen: length of filter
 429 *
 430 * Check the user's filter code. If we let some ugly
 431 * filter code slip through kaboom! The filter must contain
 432 * no references or jumps that are out of range, no illegal
 433 * instructions, and must end with a RET instruction.
 434 *
 435 * All jumps are forward as they are not signed.
 436 *
 437 * Returns 0 if the rule set is legal or -EINVAL if not.
 438 */
 439int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 440{
 441        /*
 442         * Valid instructions are initialized to non-0.
 443         * Invalid instructions are initialized to 0.
 444         */
 445        static const u8 codes[] = {
 446                [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
 447                [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
 448                [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
 449                [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
 450                [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
 451                [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
 452                [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
 453                [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
 454                [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
 455                [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
 456                [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
 457                [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
 458                [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
 459                [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
 460                [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
 461                [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
 462                [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
 463                [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
 464                [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
 465                [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
 466                [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
 467                [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
 468                [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
 469                [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
 470                [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
 471                [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
 472                [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
 473                [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
 474                [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
 475                [BPF_RET|BPF_K]          = BPF_S_RET_K,
 476                [BPF_RET|BPF_A]          = BPF_S_RET_A,
 477                [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
 478                [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
 479                [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
 480                [BPF_ST]                 = BPF_S_ST,
 481                [BPF_STX]                = BPF_S_STX,
 482                [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
 483                [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
 484                [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
 485                [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
 486                [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
 487                [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
 488                [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
 489                [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
 490                [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
 491        };
 492        int pc;
 493
 494        if (flen == 0 || flen > BPF_MAXINSNS)
 495                return -EINVAL;
 496
 497        /* check the filter code now */
 498        for (pc = 0; pc < flen; pc++) {
 499                struct sock_filter *ftest = &filter[pc];
 500                u16 code = ftest->code;
 501
 502                if (code >= ARRAY_SIZE(codes))
 503                        return -EINVAL;
 504                code = codes[code];
 505                if (!code)
 506                        return -EINVAL;
 507                /* Some instructions need special checks */
 508                switch (code) {
 509                case BPF_S_ALU_DIV_K:
 510                        /* check for division by zero */
 511                        if (ftest->k == 0)
 512                                return -EINVAL;
 513                        ftest->k = reciprocal_value(ftest->k);
 514                        break;
 515                case BPF_S_LD_MEM:
 516                case BPF_S_LDX_MEM:
 517                case BPF_S_ST:
 518                case BPF_S_STX:
 519                        /* check for invalid memory addresses */
 520                        if (ftest->k >= BPF_MEMWORDS)
 521                                return -EINVAL;
 522                        break;
 523                case BPF_S_JMP_JA:
 524                        /*
 525                         * Note, the large ftest->k might cause loops.
 526                         * Compare this with conditional jumps below,
 527                         * where offsets are limited. --ANK (981016)
 528                         */
 529                        if (ftest->k >= (unsigned)(flen-pc-1))
 530                                return -EINVAL;
 531                        break;
 532                case BPF_S_JMP_JEQ_K:
 533                case BPF_S_JMP_JEQ_X:
 534                case BPF_S_JMP_JGE_K:
 535                case BPF_S_JMP_JGE_X:
 536                case BPF_S_JMP_JGT_K:
 537                case BPF_S_JMP_JGT_X:
 538                case BPF_S_JMP_JSET_X:
 539                case BPF_S_JMP_JSET_K:
 540                        /* for conditionals both must be safe */
 541                        if (pc + ftest->jt + 1 >= flen ||
 542                            pc + ftest->jf + 1 >= flen)
 543                                return -EINVAL;
 544                        break;
 545                case BPF_S_LD_W_ABS:
 546                case BPF_S_LD_H_ABS:
 547                case BPF_S_LD_B_ABS:
 548#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE:        \
 549                                code = BPF_S_ANC_##CODE;        \
 550                                break
 551                        switch (ftest->k) {
 552                        ANCILLARY(PROTOCOL);
 553                        ANCILLARY(PKTTYPE);
 554                        ANCILLARY(IFINDEX);
 555                        ANCILLARY(NLATTR);
 556                        ANCILLARY(NLATTR_NEST);
 557                        ANCILLARY(MARK);
 558                        ANCILLARY(QUEUE);
 559                        ANCILLARY(HATYPE);
 560                        ANCILLARY(RXHASH);
 561                        ANCILLARY(CPU);
 562                        }
 563                }
 564                ftest->code = code;
 565        }
 566
 567        /* last instruction must be a RET code */
 568        switch (filter[flen - 1].code) {
 569        case BPF_S_RET_K:
 570        case BPF_S_RET_A:
 571                return check_load_and_stores(filter, flen);
 572        }
 573        return -EINVAL;
 574}
 575EXPORT_SYMBOL(sk_chk_filter);
 576
 577/**
 578 *      sk_filter_release_rcu - Release a socket filter by rcu_head
 579 *      @rcu: rcu_head that contains the sk_filter to free
 580 */
 581void sk_filter_release_rcu(struct rcu_head *rcu)
 582{
 583        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 584
 585        bpf_jit_free(fp);
 586        kfree(fp);
 587}
 588EXPORT_SYMBOL(sk_filter_release_rcu);
 589
 590/**
 591 *      sk_attach_filter - attach a socket filter
 592 *      @fprog: the filter program
 593 *      @sk: the socket to use
 594 *
 595 * Attach the user's filter code. We first run some sanity checks on
 596 * it to make sure it does not explode on us later. If an error
 597 * occurs or there is insufficient memory for the filter a negative
 598 * errno code is returned. On success the return is zero.
 599 */
 600int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 601{
 602        struct sk_filter *fp, *old_fp;
 603        unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
 604        int err;
 605
 606        /* Make sure new filter is there and in the right amounts. */
 607        if (fprog->filter == NULL)
 608                return -EINVAL;
 609
 610        fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
 611        if (!fp)
 612                return -ENOMEM;
 613        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
 614                sock_kfree_s(sk, fp, fsize+sizeof(*fp));
 615                return -EFAULT;
 616        }
 617
 618        atomic_set(&fp->refcnt, 1);
 619        fp->len = fprog->len;
 620        fp->bpf_func = sk_run_filter;
 621
 622        err = sk_chk_filter(fp->insns, fp->len);
 623        if (err) {
 624                sk_filter_uncharge(sk, fp);
 625                return err;
 626        }
 627
 628        bpf_jit_compile(fp);
 629
 630        old_fp = rcu_dereference_protected(sk->sk_filter,
 631                                           sock_owned_by_user(sk));
 632        rcu_assign_pointer(sk->sk_filter, fp);
 633
 634        if (old_fp)
 635                sk_filter_uncharge(sk, old_fp);
 636        return 0;
 637}
 638EXPORT_SYMBOL_GPL(sk_attach_filter);
 639
 640int sk_detach_filter(struct sock *sk)
 641{
 642        int ret = -ENOENT;
 643        struct sk_filter *filter;
 644
 645        filter = rcu_dereference_protected(sk->sk_filter,
 646                                           sock_owned_by_user(sk));
 647        if (filter) {
 648                RCU_INIT_POINTER(sk->sk_filter, NULL);
 649                sk_filter_uncharge(sk, filter);
 650                ret = 0;
 651        }
 652        return ret;
 653}
 654EXPORT_SYMBOL_GPL(sk_detach_filter);
 655