linux/drivers/media/rc/bpf-lirc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// bpf-lirc.c - handles bpf
   3//
   4// Copyright (C) 2018 Sean Young <sean@mess.org>
   5
   6#include <linux/bpf.h>
   7#include <linux/filter.h>
   8#include <linux/bpf_lirc.h>
   9#include "rc-core-priv.h"
  10
  11/*
  12 * BPF interface for raw IR
  13 */
  14const struct bpf_prog_ops lirc_mode2_prog_ops = {
  15};
  16
  17BPF_CALL_1(bpf_rc_repeat, u32*, sample)
  18{
  19        struct ir_raw_event_ctrl *ctrl;
  20
  21        ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  22
  23        rc_repeat(ctrl->dev);
  24
  25        return 0;
  26}
  27
  28static const struct bpf_func_proto rc_repeat_proto = {
  29        .func      = bpf_rc_repeat,
  30        .gpl_only  = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
  31        .ret_type  = RET_INTEGER,
  32        .arg1_type = ARG_PTR_TO_CTX,
  33};
  34
  35/*
  36 * Currently rc-core does not support 64-bit scancodes, but there are many
  37 * known protocols with more than 32 bits. So, define the interface as u64
  38 * as a future-proof.
  39 */
  40BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
  41           u32, toggle)
  42{
  43        struct ir_raw_event_ctrl *ctrl;
  44
  45        ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  46
  47        rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
  48
  49        return 0;
  50}
  51
  52static const struct bpf_func_proto rc_keydown_proto = {
  53        .func      = bpf_rc_keydown,
  54        .gpl_only  = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
  55        .ret_type  = RET_INTEGER,
  56        .arg1_type = ARG_PTR_TO_CTX,
  57        .arg2_type = ARG_ANYTHING,
  58        .arg3_type = ARG_ANYTHING,
  59        .arg4_type = ARG_ANYTHING,
  60};
  61
  62static const struct bpf_func_proto *
  63lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  64{
  65        switch (func_id) {
  66        case BPF_FUNC_rc_repeat:
  67                return &rc_repeat_proto;
  68        case BPF_FUNC_rc_keydown:
  69                return &rc_keydown_proto;
  70        case BPF_FUNC_map_lookup_elem:
  71                return &bpf_map_lookup_elem_proto;
  72        case BPF_FUNC_map_update_elem:
  73                return &bpf_map_update_elem_proto;
  74        case BPF_FUNC_map_delete_elem:
  75                return &bpf_map_delete_elem_proto;
  76        case BPF_FUNC_ktime_get_ns:
  77                return &bpf_ktime_get_ns_proto;
  78        case BPF_FUNC_tail_call:
  79                return &bpf_tail_call_proto;
  80        case BPF_FUNC_get_prandom_u32:
  81                return &bpf_get_prandom_u32_proto;
  82        case BPF_FUNC_trace_printk:
  83                if (capable(CAP_SYS_ADMIN))
  84                        return bpf_get_trace_printk_proto();
  85                /* fall through */
  86        default:
  87                return NULL;
  88        }
  89}
  90
  91static bool lirc_mode2_is_valid_access(int off, int size,
  92                                       enum bpf_access_type type,
  93                                       const struct bpf_prog *prog,
  94                                       struct bpf_insn_access_aux *info)
  95{
  96        /* We have one field of u32 */
  97        return type == BPF_READ && off == 0 && size == sizeof(u32);
  98}
  99
 100const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
 101        .get_func_proto  = lirc_mode2_func_proto,
 102        .is_valid_access = lirc_mode2_is_valid_access
 103};
 104
 105#define BPF_MAX_PROGS 64
 106
 107static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
 108{
 109        struct bpf_prog_array __rcu *old_array;
 110        struct bpf_prog_array *new_array;
 111        struct ir_raw_event_ctrl *raw;
 112        int ret;
 113
 114        if (rcdev->driver_type != RC_DRIVER_IR_RAW)
 115                return -EINVAL;
 116
 117        ret = mutex_lock_interruptible(&ir_raw_handler_lock);
 118        if (ret)
 119                return ret;
 120
 121        raw = rcdev->raw;
 122        if (!raw) {
 123                ret = -ENODEV;
 124                goto unlock;
 125        }
 126
 127        if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) {
 128                ret = -E2BIG;
 129                goto unlock;
 130        }
 131
 132        old_array = raw->progs;
 133        ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
 134        if (ret < 0)
 135                goto unlock;
 136
 137        rcu_assign_pointer(raw->progs, new_array);
 138        bpf_prog_array_free(old_array);
 139
 140unlock:
 141        mutex_unlock(&ir_raw_handler_lock);
 142        return ret;
 143}
 144
 145static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
 146{
 147        struct bpf_prog_array __rcu *old_array;
 148        struct bpf_prog_array *new_array;
 149        struct ir_raw_event_ctrl *raw;
 150        int ret;
 151
 152        if (rcdev->driver_type != RC_DRIVER_IR_RAW)
 153                return -EINVAL;
 154
 155        ret = mutex_lock_interruptible(&ir_raw_handler_lock);
 156        if (ret)
 157                return ret;
 158
 159        raw = rcdev->raw;
 160        if (!raw) {
 161                ret = -ENODEV;
 162                goto unlock;
 163        }
 164
 165        old_array = raw->progs;
 166        ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array);
 167        /*
 168         * Do not use bpf_prog_array_delete_safe() as we would end up
 169         * with a dummy entry in the array, and the we would free the
 170         * dummy in lirc_bpf_free()
 171         */
 172        if (ret)
 173                goto unlock;
 174
 175        rcu_assign_pointer(raw->progs, new_array);
 176        bpf_prog_array_free(old_array);
 177        bpf_prog_put(prog);
 178unlock:
 179        mutex_unlock(&ir_raw_handler_lock);
 180        return ret;
 181}
 182
 183void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
 184{
 185        struct ir_raw_event_ctrl *raw = rcdev->raw;
 186
 187        raw->bpf_sample = sample;
 188
 189        if (raw->progs)
 190                BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN);
 191}
 192
 193/*
 194 * This should be called once the rc thread has been stopped, so there can be
 195 * no concurrent bpf execution.
 196 */
 197void lirc_bpf_free(struct rc_dev *rcdev)
 198{
 199        struct bpf_prog **progs;
 200
 201        if (!rcdev->raw->progs)
 202                return;
 203
 204        progs = rcu_dereference(rcdev->raw->progs)->progs;
 205        while (*progs)
 206                bpf_prog_put(*progs++);
 207
 208        bpf_prog_array_free(rcdev->raw->progs);
 209}
 210
 211int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 212{
 213        struct rc_dev *rcdev;
 214        int ret;
 215
 216        if (attr->attach_flags)
 217                return -EINVAL;
 218
 219        rcdev = rc_dev_get_from_fd(attr->target_fd);
 220        if (IS_ERR(rcdev))
 221                return PTR_ERR(rcdev);
 222
 223        ret = lirc_bpf_attach(rcdev, prog);
 224
 225        put_device(&rcdev->dev);
 226
 227        return ret;
 228}
 229
 230int lirc_prog_detach(const union bpf_attr *attr)
 231{
 232        struct bpf_prog *prog;
 233        struct rc_dev *rcdev;
 234        int ret;
 235
 236        if (attr->attach_flags)
 237                return -EINVAL;
 238
 239        prog = bpf_prog_get_type(attr->attach_bpf_fd,
 240                                 BPF_PROG_TYPE_LIRC_MODE2);
 241        if (IS_ERR(prog))
 242                return PTR_ERR(prog);
 243
 244        rcdev = rc_dev_get_from_fd(attr->target_fd);
 245        if (IS_ERR(rcdev)) {
 246                bpf_prog_put(prog);
 247                return PTR_ERR(rcdev);
 248        }
 249
 250        ret = lirc_bpf_detach(rcdev, prog);
 251
 252        bpf_prog_put(prog);
 253        put_device(&rcdev->dev);
 254
 255        return ret;
 256}
 257
 258int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
 259{
 260        __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
 261        struct bpf_prog_array __rcu *progs;
 262        struct rc_dev *rcdev;
 263        u32 cnt, flags = 0;
 264        int ret;
 265
 266        if (attr->query.query_flags)
 267                return -EINVAL;
 268
 269        rcdev = rc_dev_get_from_fd(attr->query.target_fd);
 270        if (IS_ERR(rcdev))
 271                return PTR_ERR(rcdev);
 272
 273        if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
 274                ret = -EINVAL;
 275                goto put;
 276        }
 277
 278        ret = mutex_lock_interruptible(&ir_raw_handler_lock);
 279        if (ret)
 280                goto put;
 281
 282        progs = rcdev->raw->progs;
 283        cnt = progs ? bpf_prog_array_length(progs) : 0;
 284
 285        if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
 286                ret = -EFAULT;
 287                goto unlock;
 288        }
 289
 290        if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
 291                ret = -EFAULT;
 292                goto unlock;
 293        }
 294
 295        if (attr->query.prog_cnt != 0 && prog_ids && cnt)
 296                ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
 297
 298unlock:
 299        mutex_unlock(&ir_raw_handler_lock);
 300put:
 301        put_device(&rcdev->dev);
 302
 303        return ret;
 304}
 305