linux/kernel/bpf/helpers.c
<<
>>
Prefs
   1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 *
   7 * This program is distributed in the hope that it will be useful, but
   8 * WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10 * General Public License for more details.
  11 */
  12#include <linux/bpf.h>
  13#include <linux/rcupdate.h>
  14#include <linux/random.h>
  15#include <linux/smp.h>
  16#include <linux/topology.h>
  17#include <linux/ktime.h>
  18#include <linux/sched.h>
  19#include <linux/uidgid.h>
  20#include <linux/filter.h>
  21
  22/* If kernel subsystem is allowing eBPF programs to call this function,
  23 * inside its own verifier_ops->get_func_proto() callback it should return
  24 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
  25 *
  26 * Different map implementations will rely on rcu in map methods
  27 * lookup/update/delete, therefore eBPF programs must run under rcu lock
  28 * if program is allowed to access maps, so check rcu_read_lock_held in
  29 * all three functions.
  30 */
  31BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
  32{
  33        WARN_ON_ONCE(!rcu_read_lock_held());
  34        return (unsigned long) map->ops->map_lookup_elem(map, key);
  35}
  36
  37const struct bpf_func_proto bpf_map_lookup_elem_proto = {
  38        .func           = bpf_map_lookup_elem,
  39        .gpl_only       = false,
  40        .pkt_access     = true,
  41        .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
  42        .arg1_type      = ARG_CONST_MAP_PTR,
  43        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  44};
  45
  46BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
  47           void *, value, u64, flags)
  48{
  49        WARN_ON_ONCE(!rcu_read_lock_held());
  50        return map->ops->map_update_elem(map, key, value, flags);
  51}
  52
  53const struct bpf_func_proto bpf_map_update_elem_proto = {
  54        .func           = bpf_map_update_elem,
  55        .gpl_only       = false,
  56        .pkt_access     = true,
  57        .ret_type       = RET_INTEGER,
  58        .arg1_type      = ARG_CONST_MAP_PTR,
  59        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  60        .arg3_type      = ARG_PTR_TO_MAP_VALUE,
  61        .arg4_type      = ARG_ANYTHING,
  62};
  63
  64BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
  65{
  66        WARN_ON_ONCE(!rcu_read_lock_held());
  67        return map->ops->map_delete_elem(map, key);
  68}
  69
  70const struct bpf_func_proto bpf_map_delete_elem_proto = {
  71        .func           = bpf_map_delete_elem,
  72        .gpl_only       = false,
  73        .pkt_access     = true,
  74        .ret_type       = RET_INTEGER,
  75        .arg1_type      = ARG_CONST_MAP_PTR,
  76        .arg2_type      = ARG_PTR_TO_MAP_KEY,
  77};
  78
  79BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
  80{
  81        return map->ops->map_push_elem(map, value, flags);
  82}
  83
  84const struct bpf_func_proto bpf_map_push_elem_proto = {
  85        .func           = bpf_map_push_elem,
  86        .gpl_only       = false,
  87        .pkt_access     = true,
  88        .ret_type       = RET_INTEGER,
  89        .arg1_type      = ARG_CONST_MAP_PTR,
  90        .arg2_type      = ARG_PTR_TO_MAP_VALUE,
  91        .arg3_type      = ARG_ANYTHING,
  92};
  93
  94BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
  95{
  96        return map->ops->map_pop_elem(map, value);
  97}
  98
  99const struct bpf_func_proto bpf_map_pop_elem_proto = {
 100        .func           = bpf_map_pop_elem,
 101        .gpl_only       = false,
 102        .ret_type       = RET_INTEGER,
 103        .arg1_type      = ARG_CONST_MAP_PTR,
 104        .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
 105};
 106
 107BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 108{
 109        return map->ops->map_peek_elem(map, value);
 110}
 111
 112const struct bpf_func_proto bpf_map_peek_elem_proto = {
 113        .func           = bpf_map_pop_elem,
 114        .gpl_only       = false,
 115        .ret_type       = RET_INTEGER,
 116        .arg1_type      = ARG_CONST_MAP_PTR,
 117        .arg2_type      = ARG_PTR_TO_UNINIT_MAP_VALUE,
 118};
 119
 120const struct bpf_func_proto bpf_get_prandom_u32_proto = {
 121        .func           = bpf_user_rnd_u32,
 122        .gpl_only       = false,
 123        .ret_type       = RET_INTEGER,
 124};
 125
 126BPF_CALL_0(bpf_get_smp_processor_id)
 127{
 128        return smp_processor_id();
 129}
 130
 131const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
 132        .func           = bpf_get_smp_processor_id,
 133        .gpl_only       = false,
 134        .ret_type       = RET_INTEGER,
 135};
 136
 137BPF_CALL_0(bpf_get_numa_node_id)
 138{
 139        return numa_node_id();
 140}
 141
 142const struct bpf_func_proto bpf_get_numa_node_id_proto = {
 143        .func           = bpf_get_numa_node_id,
 144        .gpl_only       = false,
 145        .ret_type       = RET_INTEGER,
 146};
 147
 148BPF_CALL_0(bpf_ktime_get_ns)
 149{
 150        /* NMI safe access to clock monotonic */
 151        return ktime_get_mono_fast_ns();
 152}
 153
 154const struct bpf_func_proto bpf_ktime_get_ns_proto = {
 155        .func           = bpf_ktime_get_ns,
 156        .gpl_only       = true,
 157        .ret_type       = RET_INTEGER,
 158};
 159
 160BPF_CALL_0(bpf_get_current_pid_tgid)
 161{
 162        struct task_struct *task = current;
 163
 164        if (unlikely(!task))
 165                return -EINVAL;
 166
 167        return (u64) task->tgid << 32 | task->pid;
 168}
 169
 170const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
 171        .func           = bpf_get_current_pid_tgid,
 172        .gpl_only       = false,
 173        .ret_type       = RET_INTEGER,
 174};
 175
 176BPF_CALL_0(bpf_get_current_uid_gid)
 177{
 178        struct task_struct *task = current;
 179        kuid_t uid;
 180        kgid_t gid;
 181
 182        if (unlikely(!task))
 183                return -EINVAL;
 184
 185        current_uid_gid(&uid, &gid);
 186        return (u64) from_kgid(&init_user_ns, gid) << 32 |
 187                     from_kuid(&init_user_ns, uid);
 188}
 189
 190const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
 191        .func           = bpf_get_current_uid_gid,
 192        .gpl_only       = false,
 193        .ret_type       = RET_INTEGER,
 194};
 195
 196BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
 197{
 198        struct task_struct *task = current;
 199
 200        if (unlikely(!task))
 201                goto err_clear;
 202
 203        strncpy(buf, task->comm, size);
 204
 205        /* Verifier guarantees that size > 0. For task->comm exceeding
 206         * size, guarantee that buf is %NUL-terminated. Unconditionally
 207         * done here to save the size test.
 208         */
 209        buf[size - 1] = 0;
 210        return 0;
 211err_clear:
 212        memset(buf, 0, size);
 213        return -EINVAL;
 214}
 215
 216const struct bpf_func_proto bpf_get_current_comm_proto = {
 217        .func           = bpf_get_current_comm,
 218        .gpl_only       = false,
 219        .ret_type       = RET_INTEGER,
 220        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
 221        .arg2_type      = ARG_CONST_SIZE,
 222};
 223
 224#ifdef CONFIG_CGROUPS
 225BPF_CALL_0(bpf_get_current_cgroup_id)
 226{
 227        struct cgroup *cgrp = task_dfl_cgroup(current);
 228
 229        return cgrp->kn->id.id;
 230}
 231
 232const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
 233        .func           = bpf_get_current_cgroup_id,
 234        .gpl_only       = false,
 235        .ret_type       = RET_INTEGER,
 236};
 237
 238#ifdef CONFIG_CGROUP_BPF
 239DECLARE_PER_CPU(struct bpf_cgroup_storage*,
 240                bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
 241
 242BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
 243{
 244        /* flags argument is not used now,
 245         * but provides an ability to extend the API.
 246         * verifier checks that its value is correct.
 247         */
 248        enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
 249        struct bpf_cgroup_storage *storage;
 250        void *ptr;
 251
 252        storage = this_cpu_read(bpf_cgroup_storage[stype]);
 253
 254        if (stype == BPF_CGROUP_STORAGE_SHARED)
 255                ptr = &READ_ONCE(storage->buf)->data[0];
 256        else
 257                ptr = this_cpu_ptr(storage->percpu_buf);
 258
 259        return (unsigned long)ptr;
 260}
 261
 262const struct bpf_func_proto bpf_get_local_storage_proto = {
 263        .func           = bpf_get_local_storage,
 264        .gpl_only       = false,
 265        .ret_type       = RET_PTR_TO_MAP_VALUE,
 266        .arg1_type      = ARG_CONST_MAP_PTR,
 267        .arg2_type      = ARG_ANYTHING,
 268};
 269#endif
 270#endif
 271