linux/kernel/bpf/helpers.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   3 */
   4#include <linux/bpf.h>
   5#include <linux/rcupdate.h>
   6#include <linux/random.h>
   7#include <linux/smp.h>
   8#include <linux/topology.h>
   9#include <linux/ktime.h>
  10#include <linux/sched.h>
  11#include <linux/uidgid.h>
  12#include <linux/filter.h>
  13#include <linux/ctype.h>
  14
  15#include "../../lib/kstrtox.h"
  16
  17/* If kernel subsystem is allowing eBPF programs to call this function,
  18 * inside its own verifier_ops->get_func_proto() callback it should return
  19 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
  20 *
  21 * Different map implementations will rely on rcu in map methods
  22 * lookup/update/delete, therefore eBPF programs must run under rcu lock
  23 * if program is allowed to access maps, so check rcu_read_lock_held in
  24 * all three functions.
  25 */
  26BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
  27{
  28        WARN_ON_ONCE(!rcu_read_lock_held());
  29        return (unsigned long) map->ops->map_lookup_elem(map, key);
  30}
  31
  32const struct bpf_func_proto bpf_map_lookup_elem_proto = {
  33        .func           = bpf_map_lookup_elem,
  34        .gpl_only       = false,
  35        .pkt_access     = true,
  36        .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
  37        .arg1_type      = ARG_CONST_MAP_PTR,
  38        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  39};
  40
  41BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
  42           void *, value, u64, flags)
  43{
  44        WARN_ON_ONCE(!rcu_read_lock_held());
  45        return map->ops->map_update_elem(map, key, value, flags);
  46}
  47
  48const struct bpf_func_proto bpf_map_update_elem_proto = {
  49        .func           = bpf_map_update_elem,
  50        .gpl_only       = false,
  51        .pkt_access     = true,
  52        .ret_type       = RET_INTEGER,
  53        .arg1_type      = ARG_CONST_MAP_PTR,
  54        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  55        .arg3_type      = ARG_PTR_TO_MAP_VALUE,
  56        .arg4_type      = ARG_ANYTHING,
  57};
  58
  59BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
  60{
  61        WARN_ON_ONCE(!rcu_read_lock_held());
  62        return map->ops->map_delete_elem(map, key);
  63}
  64
  65const struct bpf_func_proto bpf_map_delete_elem_proto = {
  66        .func           = bpf_map_delete_elem,
  67        .gpl_only       = false,
  68        .pkt_access     = true,
  69        .ret_type       = RET_INTEGER,
  70        .arg1_type      = ARG_CONST_MAP_PTR,
  71        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  72};
  73
  74BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
  75{
  76        return map->ops->map_push_elem(map, value, flags);
  77}
  78
  79const struct bpf_func_proto bpf_map_push_elem_proto = {
  80        .func           = bpf_map_push_elem,
  81        .gpl_only       = false,
  82        .pkt_access     = true,
  83        .ret_type       = RET_INTEGER,
  84        .arg1_type      = ARG_CONST_MAP_PTR,
  85        .arg2_type      = ARG_PTR_TO_MAP_VALUE,
  86        .arg3_type      = ARG_ANYTHING,
  87};
  88
  89BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
  90{
  91        return map->ops->map_pop_elem(map, value);
  92}
  93
  94const struct bpf_func_proto bpf_map_pop_elem_proto = {
  95        .func           = bpf_map_pop_elem,
  96        .gpl_only       = false,
  97        .ret_type       = RET_INTEGER,
  98        .arg1_type      = ARG_CONST_MAP_PTR,
  99        .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
 100};
 101
 102BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 103{
 104        return map->ops->map_peek_elem(map, value);
 105}
 106
 107const struct bpf_func_proto bpf_map_peek_elem_proto = {
 108        .func           = bpf_map_pop_elem,
 109        .gpl_only       = false,
 110        .ret_type       = RET_INTEGER,
 111        .arg1_type      = ARG_CONST_MAP_PTR,
 112        .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
 113};
 114
 115const struct bpf_func_proto bpf_get_prandom_u32_proto = {
 116        .func           = bpf_user_rnd_u32,
 117        .gpl_only       = false,
 118        .ret_type       = RET_INTEGER,
 119};
 120
 121BPF_CALL_0(bpf_get_smp_processor_id)
 122{
 123        return smp_processor_id();
 124}
 125
 126const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
 127        .func           = bpf_get_smp_processor_id,
 128        .gpl_only       = false,
 129        .ret_type       = RET_INTEGER,
 130};
 131
 132BPF_CALL_0(bpf_get_numa_node_id)
 133{
 134        return numa_node_id();
 135}
 136
 137const struct bpf_func_proto bpf_get_numa_node_id_proto = {
 138        .func           = bpf_get_numa_node_id,
 139        .gpl_only       = false,
 140        .ret_type       = RET_INTEGER,
 141};
 142
 143BPF_CALL_0(bpf_ktime_get_ns)
 144{
 145        /* NMI safe access to clock monotonic */
 146        return ktime_get_mono_fast_ns();
 147}
 148
 149const struct bpf_func_proto bpf_ktime_get_ns_proto = {
 150        .func           = bpf_ktime_get_ns,
 151        .gpl_only       = true,
 152        .ret_type       = RET_INTEGER,
 153};
 154
 155BPF_CALL_0(bpf_get_current_pid_tgid)
 156{
 157        struct task_struct *task = current;
 158
 159        if (unlikely(!task))
 160                return -EINVAL;
 161
 162        return (u64) task->tgid << 32 | task->pid;
 163}
 164
 165const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
 166        .func           = bpf_get_current_pid_tgid,
 167        .gpl_only       = false,
 168        .ret_type       = RET_INTEGER,
 169};
 170
 171BPF_CALL_0(bpf_get_current_uid_gid)
 172{
 173        struct task_struct *task = current;
 174        kuid_t uid;
 175        kgid_t gid;
 176
 177        if (unlikely(!task))
 178                return -EINVAL;
 179
 180        current_uid_gid(&uid, &gid);
 181        return (u64) from_kgid(&init_user_ns, gid) << 32 |
 182                     from_kuid(&init_user_ns, uid);
 183}
 184
 185const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
 186        .func           = bpf_get_current_uid_gid,
 187        .gpl_only       = false,
 188        .ret_type       = RET_INTEGER,
 189};
 190
 191BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
 192{
 193        struct task_struct *task = current;
 194
 195        if (unlikely(!task))
 196                goto err_clear;
 197
 198        strncpy(buf, task->comm, size);
 199
 200        /* Verifier guarantees that size > 0. For task->comm exceeding
 201         * size, guarantee that buf is %NUL-terminated. Unconditionally
 202         * done here to save the size test.
 203         */
 204        buf[size - 1] = 0;
 205        return 0;
 206err_clear:
 207        memset(buf, 0, size);
 208        return -EINVAL;
 209}
 210
 211const struct bpf_func_proto bpf_get_current_comm_proto = {
 212        .func           = bpf_get_current_comm,
 213        .gpl_only       = false,
 214        .ret_type       = RET_INTEGER,
 215        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
 216        .arg2_type      = ARG_CONST_SIZE,
 217};
 218
 219#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
 220
 221static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 222{
 223        arch_spinlock_t *l = (void *)lock;
 224        union {
 225                __u32 val;
 226                arch_spinlock_t lock;
 227        } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
 228
 229        compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
 230        BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
 231        BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
 232        arch_spin_lock(l);
 233}
 234
 235static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 236{
 237        arch_spinlock_t *l = (void *)lock;
 238
 239        arch_spin_unlock(l);
 240}
 241
 242#else
 243
 244static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
 245{
 246        atomic_t *l = (void *)lock;
 247
 248        BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
 249        do {
 250                atomic_cond_read_relaxed(l, !VAL);
 251        } while (atomic_xchg(l, 1));
 252}
 253
 254static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 255{
 256        atomic_t *l = (void *)lock;
 257
 258        atomic_set_release(l, 0);
 259}
 260
 261#endif
 262
 263static DEFINE_PER_CPU(unsigned long, irqsave_flags);
 264
 265notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
 266{
 267        unsigned long flags;
 268
 269        local_irq_save(flags);
 270        __bpf_spin_lock(lock);
 271        __this_cpu_write(irqsave_flags, flags);
 272        return 0;
 273}
 274
 275const struct bpf_func_proto bpf_spin_lock_proto = {
 276        .func           = bpf_spin_lock,
 277        .gpl_only       = false,
 278        .ret_type       = RET_VOID,
 279        .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
 280};
 281
 282notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
 283{
 284        unsigned long flags;
 285
 286        flags = __this_cpu_read(irqsave_flags);
 287        __bpf_spin_unlock(lock);
 288        local_irq_restore(flags);
 289        return 0;
 290}
 291
 292const struct bpf_func_proto bpf_spin_unlock_proto = {
 293        .func           = bpf_spin_unlock,
 294        .gpl_only       = false,
 295        .ret_type       = RET_VOID,
 296        .arg1_type      = ARG_PTR_TO_SPIN_LOCK,
 297};
 298
 299void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
 300                           bool lock_src)
 301{
 302        struct bpf_spin_lock *lock;
 303
 304        if (lock_src)
 305                lock = src + map->spin_lock_off;
 306        else
 307                lock = dst + map->spin_lock_off;
 308        preempt_disable();
 309        ____bpf_spin_lock(lock);
 310        copy_map_value(map, dst, src);
 311        ____bpf_spin_unlock(lock);
 312        preempt_enable();
 313}
 314
 315#ifdef CONFIG_CGROUPS
 316BPF_CALL_0(bpf_get_current_cgroup_id)
 317{
 318        struct cgroup *cgrp = task_dfl_cgroup(current);
 319
 320        return cgroup_id(cgrp);
 321}
 322
 323const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 324        .func           = bpf_get_current_cgroup_id,
 325        .gpl_only       = false,
 326        .ret_type       = RET_INTEGER,
 327};
 328
 329#ifdef CONFIG_CGROUP_BPF
 330DECLARE_PER_CPU(struct bpf_cgroup_storage*,
 331                bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
 332
 333BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
 334{
 335        /* flags argument is not used now,
 336         * but provides an ability to extend the API.
 337         * verifier checks that its value is correct.
 338         */
 339        enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
 340        struct bpf_cgroup_storage *storage;
 341        void *ptr;
 342
 343        storage = this_cpu_read(bpf_cgroup_storage[stype]);
 344
 345        if (stype == BPF_CGROUP_STORAGE_SHARED)
 346                ptr = &READ_ONCE(storage->buf)->data[0];
 347        else
 348                ptr = this_cpu_ptr(storage->percpu_buf);
 349
 350        return (unsigned long)ptr;
 351}
 352
 353const struct bpf_func_proto bpf_get_local_storage_proto = {
 354        .func           = bpf_get_local_storage,
 355        .gpl_only       = false,
 356        .ret_type       = RET_PTR_TO_MAP_VALUE,
 357        .arg1_type      = ARG_CONST_MAP_PTR,
 358        .arg2_type      = ARG_ANYTHING,
 359};
 360#endif
 361
 362#define BPF_STRTOX_BASE_MASK 0x1F
 363
 364static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
 365                          unsigned long long *res, bool *is_negative)
 366{
 367        unsigned int base = flags & BPF_STRTOX_BASE_MASK;
 368        const char *cur_buf = buf;
 369        size_t cur_len = buf_len;
 370        unsigned int consumed;
 371        size_t val_len;
 372        char str[64];
 373
 374        if (!buf || !buf_len || !res || !is_negative)
 375                return -EINVAL;
 376
 377        if (base != 0 && base != 8 && base != 10 && base != 16)
 378                return -EINVAL;
 379
 380        if (flags & ~BPF_STRTOX_BASE_MASK)
 381                return -EINVAL;
 382
 383        while (cur_buf < buf + buf_len && isspace(*cur_buf))
 384                ++cur_buf;
 385
 386        *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
 387        if (*is_negative)
 388                ++cur_buf;
 389
 390        consumed = cur_buf - buf;
 391        cur_len -= consumed;
 392        if (!cur_len)
 393                return -EINVAL;
 394
 395        cur_len = min(cur_len, sizeof(str) - 1);
 396        memcpy(str, cur_buf, cur_len);
 397        str[cur_len] = '\0';
 398        cur_buf = str;
 399
 400        cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
 401        val_len = _parse_integer(cur_buf, base, res);
 402
 403        if (val_len & KSTRTOX_OVERFLOW)
 404                return -ERANGE;
 405
 406        if (val_len == 0)
 407                return -EINVAL;
 408
 409        cur_buf += val_len;
 410        consumed += cur_buf - str;
 411
 412        return consumed;
 413}
 414
 415static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
 416                         long long *res)
 417{
 418        unsigned long long _res;
 419        bool is_negative;
 420        int err;
 421
 422        err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 423        if (err < 0)
 424                return err;
 425        if (is_negative) {
 426                if ((long long)-_res > 0)
 427                        return -ERANGE;
 428                *res = -_res;
 429        } else {
 430                if ((long long)_res < 0)
 431                        return -ERANGE;
 432                *res = _res;
 433        }
 434        return err;
 435}
 436
 437BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
 438           long *, res)
 439{
 440        long long _res;
 441        int err;
 442
 443        err = __bpf_strtoll(buf, buf_len, flags, &_res);
 444        if (err < 0)
 445                return err;
 446        if (_res != (long)_res)
 447                return -ERANGE;
 448        *res = _res;
 449        return err;
 450}
 451
 452const struct bpf_func_proto bpf_strtol_proto = {
 453        .func           = bpf_strtol,
 454        .gpl_only       = false,
 455        .ret_type       = RET_INTEGER,
 456        .arg1_type      = ARG_PTR_TO_MEM,
 457        .arg2_type      = ARG_CONST_SIZE,
 458        .arg3_type      = ARG_ANYTHING,
 459        .arg4_type      = ARG_PTR_TO_LONG,
 460};
 461
 462BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
 463           unsigned long *, res)
 464{
 465        unsigned long long _res;
 466        bool is_negative;
 467        int err;
 468
 469        err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
 470        if (err < 0)
 471                return err;
 472        if (is_negative)
 473                return -EINVAL;
 474        if (_res != (unsigned long)_res)
 475                return -ERANGE;
 476        *res = _res;
 477        return err;
 478}
 479
 480const struct bpf_func_proto bpf_strtoul_proto = {
 481        .func           = bpf_strtoul,
 482        .gpl_only       = false,
 483        .ret_type       = RET_INTEGER,
 484        .arg1_type      = ARG_PTR_TO_MEM,
 485        .arg2_type      = ARG_CONST_SIZE,
 486        .arg3_type      = ARG_ANYTHING,
 487        .arg4_type      = ARG_PTR_TO_LONG,
 488};
 489#endif
 490