linux/kernel/bpf/bpf_iter.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2020 Facebook */
   3
   4#include <linux/fs.h>
   5#include <linux/anon_inodes.h>
   6#include <linux/filter.h>
   7#include <linux/bpf.h>
   8
   9struct bpf_iter_target_info {
  10        struct list_head list;
  11        const struct bpf_iter_reg *reg_info;
  12        u32 btf_id;     /* cached value */
  13};
  14
  15struct bpf_iter_link {
  16        struct bpf_link link;
  17        struct bpf_iter_aux_info aux;
  18        struct bpf_iter_target_info *tinfo;
  19};
  20
  21struct bpf_iter_priv_data {
  22        struct bpf_iter_target_info *tinfo;
  23        const struct bpf_iter_seq_info *seq_info;
  24        struct bpf_prog *prog;
  25        u64 session_id;
  26        u64 seq_num;
  27        bool done_stop;
  28        u8 target_private[] __aligned(8);
  29};
  30
  31static struct list_head targets = LIST_HEAD_INIT(targets);
  32static DEFINE_MUTEX(targets_mutex);
  33
  34/* protect bpf_iter_link changes */
  35static DEFINE_MUTEX(link_mutex);
  36
  37/* incremented on every opened seq_file */
  38static atomic64_t session_id;
  39
  40static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
  41                            const struct bpf_iter_seq_info *seq_info);
  42
  43static void bpf_iter_inc_seq_num(struct seq_file *seq)
  44{
  45        struct bpf_iter_priv_data *iter_priv;
  46
  47        iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
  48                                 target_private);
  49        iter_priv->seq_num++;
  50}
  51
  52static void bpf_iter_dec_seq_num(struct seq_file *seq)
  53{
  54        struct bpf_iter_priv_data *iter_priv;
  55
  56        iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
  57                                 target_private);
  58        iter_priv->seq_num--;
  59}
  60
  61static void bpf_iter_done_stop(struct seq_file *seq)
  62{
  63        struct bpf_iter_priv_data *iter_priv;
  64
  65        iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
  66                                 target_private);
  67        iter_priv->done_stop = true;
  68}
  69
  70static bool bpf_iter_support_resched(struct seq_file *seq)
  71{
  72        struct bpf_iter_priv_data *iter_priv;
  73
  74        iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
  75                                 target_private);
  76        return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
  77}
  78
  79/* maximum visited objects before bailing out */
  80#define MAX_ITER_OBJECTS        1000000
  81
  82/* bpf_seq_read, a customized and simpler version for bpf iterator.
  83 * no_llseek is assumed for this file.
  84 * The following are differences from seq_read():
  85 *  . fixed buffer size (PAGE_SIZE)
  86 *  . assuming no_llseek
  87 *  . stop() may call bpf program, handling potential overflow there
  88 */
  89static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
  90                            loff_t *ppos)
  91{
  92        struct seq_file *seq = file->private_data;
  93        size_t n, offs, copied = 0;
  94        int err = 0, num_objs = 0;
  95        bool can_resched;
  96        void *p;
  97
  98        mutex_lock(&seq->lock);
  99
 100        if (!seq->buf) {
 101                seq->size = PAGE_SIZE << 3;
 102                seq->buf = kvmalloc(seq->size, GFP_KERNEL);
 103                if (!seq->buf) {
 104                        err = -ENOMEM;
 105                        goto done;
 106                }
 107        }
 108
 109        if (seq->count) {
 110                n = min(seq->count, size);
 111                err = copy_to_user(buf, seq->buf + seq->from, n);
 112                if (err) {
 113                        err = -EFAULT;
 114                        goto done;
 115                }
 116                seq->count -= n;
 117                seq->from += n;
 118                copied = n;
 119                goto done;
 120        }
 121
 122        seq->from = 0;
 123        p = seq->op->start(seq, &seq->index);
 124        if (!p)
 125                goto stop;
 126        if (IS_ERR(p)) {
 127                err = PTR_ERR(p);
 128                seq->op->stop(seq, p);
 129                seq->count = 0;
 130                goto done;
 131        }
 132
 133        err = seq->op->show(seq, p);
 134        if (err > 0) {
 135                /* object is skipped, decrease seq_num, so next
 136                 * valid object can reuse the same seq_num.
 137                 */
 138                bpf_iter_dec_seq_num(seq);
 139                seq->count = 0;
 140        } else if (err < 0 || seq_has_overflowed(seq)) {
 141                if (!err)
 142                        err = -E2BIG;
 143                seq->op->stop(seq, p);
 144                seq->count = 0;
 145                goto done;
 146        }
 147
 148        can_resched = bpf_iter_support_resched(seq);
 149        while (1) {
 150                loff_t pos = seq->index;
 151
 152                num_objs++;
 153                offs = seq->count;
 154                p = seq->op->next(seq, p, &seq->index);
 155                if (pos == seq->index) {
 156                        pr_info_ratelimited("buggy seq_file .next function %ps "
 157                                "did not updated position index\n",
 158                                seq->op->next);
 159                        seq->index++;
 160                }
 161
 162                if (IS_ERR_OR_NULL(p))
 163                        break;
 164
 165                /* got a valid next object, increase seq_num */
 166                bpf_iter_inc_seq_num(seq);
 167
 168                if (seq->count >= size)
 169                        break;
 170
 171                if (num_objs >= MAX_ITER_OBJECTS) {
 172                        if (offs == 0) {
 173                                err = -EAGAIN;
 174                                seq->op->stop(seq, p);
 175                                goto done;
 176                        }
 177                        break;
 178                }
 179
 180                err = seq->op->show(seq, p);
 181                if (err > 0) {
 182                        bpf_iter_dec_seq_num(seq);
 183                        seq->count = offs;
 184                } else if (err < 0 || seq_has_overflowed(seq)) {
 185                        seq->count = offs;
 186                        if (offs == 0) {
 187                                if (!err)
 188                                        err = -E2BIG;
 189                                seq->op->stop(seq, p);
 190                                goto done;
 191                        }
 192                        break;
 193                }
 194
 195                if (can_resched)
 196                        cond_resched();
 197        }
 198stop:
 199        offs = seq->count;
 200        /* bpf program called if !p */
 201        seq->op->stop(seq, p);
 202        if (!p) {
 203                if (!seq_has_overflowed(seq)) {
 204                        bpf_iter_done_stop(seq);
 205                } else {
 206                        seq->count = offs;
 207                        if (offs == 0) {
 208                                err = -E2BIG;
 209                                goto done;
 210                        }
 211                }
 212        }
 213
 214        n = min(seq->count, size);
 215        err = copy_to_user(buf, seq->buf, n);
 216        if (err) {
 217                err = -EFAULT;
 218                goto done;
 219        }
 220        copied = n;
 221        seq->count -= n;
 222        seq->from = n;
 223done:
 224        if (!copied)
 225                copied = err;
 226        else
 227                *ppos += copied;
 228        mutex_unlock(&seq->lock);
 229        return copied;
 230}
 231
 232static const struct bpf_iter_seq_info *
 233__get_seq_info(struct bpf_iter_link *link)
 234{
 235        const struct bpf_iter_seq_info *seq_info;
 236
 237        if (link->aux.map) {
 238                seq_info = link->aux.map->ops->iter_seq_info;
 239                if (seq_info)
 240                        return seq_info;
 241        }
 242
 243        return link->tinfo->reg_info->seq_info;
 244}
 245
 246static int iter_open(struct inode *inode, struct file *file)
 247{
 248        struct bpf_iter_link *link = inode->i_private;
 249
 250        return prepare_seq_file(file, link, __get_seq_info(link));
 251}
 252
 253static int iter_release(struct inode *inode, struct file *file)
 254{
 255        struct bpf_iter_priv_data *iter_priv;
 256        struct seq_file *seq;
 257
 258        seq = file->private_data;
 259        if (!seq)
 260                return 0;
 261
 262        iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
 263                                 target_private);
 264
 265        if (iter_priv->seq_info->fini_seq_private)
 266                iter_priv->seq_info->fini_seq_private(seq->private);
 267
 268        bpf_prog_put(iter_priv->prog);
 269        seq->private = iter_priv;
 270
 271        return seq_release_private(inode, file);
 272}
 273
 274const struct file_operations bpf_iter_fops = {
 275        .open           = iter_open,
 276        .llseek         = no_llseek,
 277        .read           = bpf_seq_read,
 278        .release        = iter_release,
 279};
 280
 281/* The argument reg_info will be cached in bpf_iter_target_info.
 282 * The common practice is to declare target reg_info as
 283 * a const static variable and passed as an argument to
 284 * bpf_iter_reg_target().
 285 */
 286int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
 287{
 288        struct bpf_iter_target_info *tinfo;
 289
 290        tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
 291        if (!tinfo)
 292                return -ENOMEM;
 293
 294        tinfo->reg_info = reg_info;
 295        INIT_LIST_HEAD(&tinfo->list);
 296
 297        mutex_lock(&targets_mutex);
 298        list_add(&tinfo->list, &targets);
 299        mutex_unlock(&targets_mutex);
 300
 301        return 0;
 302}
 303
 304void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
 305{
 306        struct bpf_iter_target_info *tinfo;
 307        bool found = false;
 308
 309        mutex_lock(&targets_mutex);
 310        list_for_each_entry(tinfo, &targets, list) {
 311                if (reg_info == tinfo->reg_info) {
 312                        list_del(&tinfo->list);
 313                        kfree(tinfo);
 314                        found = true;
 315                        break;
 316                }
 317        }
 318        mutex_unlock(&targets_mutex);
 319
 320        WARN_ON(found == false);
 321}
 322
 323static void cache_btf_id(struct bpf_iter_target_info *tinfo,
 324                         struct bpf_prog *prog)
 325{
 326        tinfo->btf_id = prog->aux->attach_btf_id;
 327}
 328
 329bool bpf_iter_prog_supported(struct bpf_prog *prog)
 330{
 331        const char *attach_fname = prog->aux->attach_func_name;
 332        u32 prog_btf_id = prog->aux->attach_btf_id;
 333        const char *prefix = BPF_ITER_FUNC_PREFIX;
 334        struct bpf_iter_target_info *tinfo;
 335        int prefix_len = strlen(prefix);
 336        bool supported = false;
 337
 338        if (strncmp(attach_fname, prefix, prefix_len))
 339                return false;
 340
 341        mutex_lock(&targets_mutex);
 342        list_for_each_entry(tinfo, &targets, list) {
 343                if (tinfo->btf_id && tinfo->btf_id == prog_btf_id) {
 344                        supported = true;
 345                        break;
 346                }
 347                if (!strcmp(attach_fname + prefix_len, tinfo->reg_info->target)) {
 348                        cache_btf_id(tinfo, prog);
 349                        supported = true;
 350                        break;
 351                }
 352        }
 353        mutex_unlock(&targets_mutex);
 354
 355        if (supported) {
 356                prog->aux->ctx_arg_info_size = tinfo->reg_info->ctx_arg_info_size;
 357                prog->aux->ctx_arg_info = tinfo->reg_info->ctx_arg_info;
 358        }
 359
 360        return supported;
 361}
 362
 363static void bpf_iter_link_release(struct bpf_link *link)
 364{
 365        struct bpf_iter_link *iter_link =
 366                container_of(link, struct bpf_iter_link, link);
 367
 368        if (iter_link->tinfo->reg_info->detach_target)
 369                iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
 370}
 371
 372static void bpf_iter_link_dealloc(struct bpf_link *link)
 373{
 374        struct bpf_iter_link *iter_link =
 375                container_of(link, struct bpf_iter_link, link);
 376
 377        kfree(iter_link);
 378}
 379
 380static int bpf_iter_link_replace(struct bpf_link *link,
 381                                 struct bpf_prog *new_prog,
 382                                 struct bpf_prog *old_prog)
 383{
 384        int ret = 0;
 385
 386        mutex_lock(&link_mutex);
 387        if (old_prog && link->prog != old_prog) {
 388                ret = -EPERM;
 389                goto out_unlock;
 390        }
 391
 392        if (link->prog->type != new_prog->type ||
 393            link->prog->expected_attach_type != new_prog->expected_attach_type ||
 394            link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
 395                ret = -EINVAL;
 396                goto out_unlock;
 397        }
 398
 399        old_prog = xchg(&link->prog, new_prog);
 400        bpf_prog_put(old_prog);
 401
 402out_unlock:
 403        mutex_unlock(&link_mutex);
 404        return ret;
 405}
 406
 407static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
 408                                      struct seq_file *seq)
 409{
 410        struct bpf_iter_link *iter_link =
 411                container_of(link, struct bpf_iter_link, link);
 412        bpf_iter_show_fdinfo_t show_fdinfo;
 413
 414        seq_printf(seq,
 415                   "target_name:\t%s\n",
 416                   iter_link->tinfo->reg_info->target);
 417
 418        show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
 419        if (show_fdinfo)
 420                show_fdinfo(&iter_link->aux, seq);
 421}
 422
 423static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
 424                                        struct bpf_link_info *info)
 425{
 426        struct bpf_iter_link *iter_link =
 427                container_of(link, struct bpf_iter_link, link);
 428        char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
 429        bpf_iter_fill_link_info_t fill_link_info;
 430        u32 ulen = info->iter.target_name_len;
 431        const char *target_name;
 432        u32 target_len;
 433
 434        if (!ulen ^ !ubuf)
 435                return -EINVAL;
 436
 437        target_name = iter_link->tinfo->reg_info->target;
 438        target_len =  strlen(target_name);
 439        info->iter.target_name_len = target_len + 1;
 440
 441        if (ubuf) {
 442                if (ulen >= target_len + 1) {
 443                        if (copy_to_user(ubuf, target_name, target_len + 1))
 444                                return -EFAULT;
 445                } else {
 446                        char zero = '\0';
 447
 448                        if (copy_to_user(ubuf, target_name, ulen - 1))
 449                                return -EFAULT;
 450                        if (put_user(zero, ubuf + ulen - 1))
 451                                return -EFAULT;
 452                        return -ENOSPC;
 453                }
 454        }
 455
 456        fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
 457        if (fill_link_info)
 458                return fill_link_info(&iter_link->aux, info);
 459
 460        return 0;
 461}
 462
 463static const struct bpf_link_ops bpf_iter_link_lops = {
 464        .release = bpf_iter_link_release,
 465        .dealloc = bpf_iter_link_dealloc,
 466        .update_prog = bpf_iter_link_replace,
 467        .show_fdinfo = bpf_iter_link_show_fdinfo,
 468        .fill_link_info = bpf_iter_link_fill_link_info,
 469};
 470
 471bool bpf_link_is_iter(struct bpf_link *link)
 472{
 473        return link->ops == &bpf_iter_link_lops;
 474}
 475
 476int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 477{
 478        union bpf_iter_link_info __user *ulinfo;
 479        struct bpf_link_primer link_primer;
 480        struct bpf_iter_target_info *tinfo;
 481        union bpf_iter_link_info linfo;
 482        struct bpf_iter_link *link;
 483        u32 prog_btf_id, linfo_len;
 484        bool existed = false;
 485        int err;
 486
 487        if (attr->link_create.target_fd || attr->link_create.flags)
 488                return -EINVAL;
 489
 490        memset(&linfo, 0, sizeof(union bpf_iter_link_info));
 491
 492        ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
 493        linfo_len = attr->link_create.iter_info_len;
 494        if (!ulinfo ^ !linfo_len)
 495                return -EINVAL;
 496
 497        if (ulinfo) {
 498                err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
 499                                               linfo_len);
 500                if (err)
 501                        return err;
 502                linfo_len = min_t(u32, linfo_len, sizeof(linfo));
 503                if (copy_from_user(&linfo, ulinfo, linfo_len))
 504                        return -EFAULT;
 505        }
 506
 507        prog_btf_id = prog->aux->attach_btf_id;
 508        mutex_lock(&targets_mutex);
 509        list_for_each_entry(tinfo, &targets, list) {
 510                if (tinfo->btf_id == prog_btf_id) {
 511                        existed = true;
 512                        break;
 513                }
 514        }
 515        mutex_unlock(&targets_mutex);
 516        if (!existed)
 517                return -ENOENT;
 518
 519        link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
 520        if (!link)
 521                return -ENOMEM;
 522
 523        bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog);
 524        link->tinfo = tinfo;
 525
 526        err  = bpf_link_prime(&link->link, &link_primer);
 527        if (err) {
 528                kfree(link);
 529                return err;
 530        }
 531
 532        if (tinfo->reg_info->attach_target) {
 533                err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
 534                if (err) {
 535                        bpf_link_cleanup(&link_primer);
 536                        return err;
 537                }
 538        }
 539
 540        return bpf_link_settle(&link_primer);
 541}
 542
 543static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
 544                          struct bpf_iter_target_info *tinfo,
 545                          const struct bpf_iter_seq_info *seq_info,
 546                          struct bpf_prog *prog)
 547{
 548        priv_data->tinfo = tinfo;
 549        priv_data->seq_info = seq_info;
 550        priv_data->prog = prog;
 551        priv_data->session_id = atomic64_inc_return(&session_id);
 552        priv_data->seq_num = 0;
 553        priv_data->done_stop = false;
 554}
 555
 556static int prepare_seq_file(struct file *file, struct bpf_iter_link *link,
 557                            const struct bpf_iter_seq_info *seq_info)
 558{
 559        struct bpf_iter_priv_data *priv_data;
 560        struct bpf_iter_target_info *tinfo;
 561        struct bpf_prog *prog;
 562        u32 total_priv_dsize;
 563        struct seq_file *seq;
 564        int err = 0;
 565
 566        mutex_lock(&link_mutex);
 567        prog = link->link.prog;
 568        bpf_prog_inc(prog);
 569        mutex_unlock(&link_mutex);
 570
 571        tinfo = link->tinfo;
 572        total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
 573                           seq_info->seq_priv_size;
 574        priv_data = __seq_open_private(file, seq_info->seq_ops,
 575                                       total_priv_dsize);
 576        if (!priv_data) {
 577                err = -ENOMEM;
 578                goto release_prog;
 579        }
 580
 581        if (seq_info->init_seq_private) {
 582                err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
 583                if (err)
 584                        goto release_seq_file;
 585        }
 586
 587        init_seq_meta(priv_data, tinfo, seq_info, prog);
 588        seq = file->private_data;
 589        seq->private = priv_data->target_private;
 590
 591        return 0;
 592
 593release_seq_file:
 594        seq_release_private(file->f_inode, file);
 595        file->private_data = NULL;
 596release_prog:
 597        bpf_prog_put(prog);
 598        return err;
 599}
 600
 601int bpf_iter_new_fd(struct bpf_link *link)
 602{
 603        struct bpf_iter_link *iter_link;
 604        struct file *file;
 605        unsigned int flags;
 606        int err, fd;
 607
 608        if (link->ops != &bpf_iter_link_lops)
 609                return -EINVAL;
 610
 611        flags = O_RDONLY | O_CLOEXEC;
 612        fd = get_unused_fd_flags(flags);
 613        if (fd < 0)
 614                return fd;
 615
 616        file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags);
 617        if (IS_ERR(file)) {
 618                err = PTR_ERR(file);
 619                goto free_fd;
 620        }
 621
 622        iter_link = container_of(link, struct bpf_iter_link, link);
 623        err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link));
 624        if (err)
 625                goto free_file;
 626
 627        fd_install(fd, file);
 628        return fd;
 629
 630free_file:
 631        fput(file);
 632free_fd:
 633        put_unused_fd(fd);
 634        return err;
 635}
 636
 637struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
 638{
 639        struct bpf_iter_priv_data *iter_priv;
 640        struct seq_file *seq;
 641        void *seq_priv;
 642
 643        seq = meta->seq;
 644        if (seq->file->f_op != &bpf_iter_fops)
 645                return NULL;
 646
 647        seq_priv = seq->private;
 648        iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
 649                                 target_private);
 650
 651        if (in_stop && iter_priv->done_stop)
 652                return NULL;
 653
 654        meta->session_id = iter_priv->session_id;
 655        meta->seq_num = iter_priv->seq_num;
 656
 657        return iter_priv->prog;
 658}
 659
 660int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
 661{
 662        int ret;
 663
 664        rcu_read_lock();
 665        migrate_disable();
 666        ret = BPF_PROG_RUN(prog, ctx);
 667        migrate_enable();
 668        rcu_read_unlock();
 669
 670        /* bpf program can only return 0 or 1:
 671         *  0 : okay
 672         *  1 : retry the same object
 673         * The bpf_iter_run_prog() return value
 674         * will be seq_ops->show() return value.
 675         */
 676        return ret == 0 ? 0 : -EAGAIN;
 677}
 678
 679BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
 680           void *, callback_ctx, u64, flags)
 681{
 682        return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
 683}
 684
 685const struct bpf_func_proto bpf_for_each_map_elem_proto = {
 686        .func           = bpf_for_each_map_elem,
 687        .gpl_only       = false,
 688        .ret_type       = RET_INTEGER,
 689        .arg1_type      = ARG_CONST_MAP_PTR,
 690        .arg2_type      = ARG_PTR_TO_FUNC,
 691        .arg3_type      = ARG_PTR_TO_STACK_OR_NULL,
 692        .arg4_type      = ARG_ANYTHING,
 693};
 694