linux/drivers/connector/cn_proc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * cn_proc.c - process events connector
   4 *
   5 * Copyright (C) Matt Helsley, IBM Corp. 2005
   6 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
   7 * Original copyright notice follows:
   8 * Copyright (C) 2005 BULL SA.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/ktime.h>
  13#include <linux/init.h>
  14#include <linux/connector.h>
  15#include <linux/gfp.h>
  16#include <linux/ptrace.h>
  17#include <linux/atomic.h>
  18#include <linux/pid_namespace.h>
  19
  20#include <linux/cn_proc.h>
  21#include <linux/local_lock.h>
  22
  23/*
  24 * Size of a cn_msg followed by a proc_event structure.  Since the
  25 * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
  26 * add one 4-byte word to the size here, and then start the actual
  27 * cn_msg structure 4 bytes into the stack buffer.  The result is that
  28 * the immediately following proc_event structure is aligned to 8 bytes.
  29 */
  30#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
  31
  32/* See comment above; we test our assumption about sizeof struct cn_msg here. */
  33static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
  34{
  35        BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
  36        return (struct cn_msg *)(buffer + 4);
  37}
  38
  39static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
  40static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
  41
  42/* local_event.count is used as the sequence number of the netlink message */
  43struct local_event {
  44        local_lock_t lock;
  45        __u32 count;
  46};
  47static DEFINE_PER_CPU(struct local_event, local_event) = {
  48        .lock = INIT_LOCAL_LOCK(lock),
  49};
  50
  51static inline void send_msg(struct cn_msg *msg)
  52{
  53        local_lock(&local_event.lock);
  54
  55        msg->seq = __this_cpu_inc_return(local_event.count) - 1;
  56        ((struct proc_event *)msg->data)->cpu = smp_processor_id();
  57
  58        /*
  59         * local_lock() disables preemption during send to ensure the messages
  60         * are ordered according to their sequence numbers.
  61         *
  62         * If cn_netlink_send() fails, the data is not sent.
  63         */
  64        cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
  65
  66        local_unlock(&local_event.lock);
  67}
  68
  69void proc_fork_connector(struct task_struct *task)
  70{
  71        struct cn_msg *msg;
  72        struct proc_event *ev;
  73        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
  74        struct task_struct *parent;
  75
  76        if (atomic_read(&proc_event_num_listeners) < 1)
  77                return;
  78
  79        msg = buffer_to_cn_msg(buffer);
  80        ev = (struct proc_event *)msg->data;
  81        memset(&ev->event_data, 0, sizeof(ev->event_data));
  82        ev->timestamp_ns = ktime_get_ns();
  83        ev->what = PROC_EVENT_FORK;
  84        rcu_read_lock();
  85        parent = rcu_dereference(task->real_parent);
  86        ev->event_data.fork.parent_pid = parent->pid;
  87        ev->event_data.fork.parent_tgid = parent->tgid;
  88        rcu_read_unlock();
  89        ev->event_data.fork.child_pid = task->pid;
  90        ev->event_data.fork.child_tgid = task->tgid;
  91
  92        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
  93        msg->ack = 0; /* not used */
  94        msg->len = sizeof(*ev);
  95        msg->flags = 0; /* not used */
  96        send_msg(msg);
  97}
  98
  99void proc_exec_connector(struct task_struct *task)
 100{
 101        struct cn_msg *msg;
 102        struct proc_event *ev;
 103        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 104
 105        if (atomic_read(&proc_event_num_listeners) < 1)
 106                return;
 107
 108        msg = buffer_to_cn_msg(buffer);
 109        ev = (struct proc_event *)msg->data;
 110        memset(&ev->event_data, 0, sizeof(ev->event_data));
 111        ev->timestamp_ns = ktime_get_ns();
 112        ev->what = PROC_EVENT_EXEC;
 113        ev->event_data.exec.process_pid = task->pid;
 114        ev->event_data.exec.process_tgid = task->tgid;
 115
 116        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 117        msg->ack = 0; /* not used */
 118        msg->len = sizeof(*ev);
 119        msg->flags = 0; /* not used */
 120        send_msg(msg);
 121}
 122
 123void proc_id_connector(struct task_struct *task, int which_id)
 124{
 125        struct cn_msg *msg;
 126        struct proc_event *ev;
 127        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 128        const struct cred *cred;
 129
 130        if (atomic_read(&proc_event_num_listeners) < 1)
 131                return;
 132
 133        msg = buffer_to_cn_msg(buffer);
 134        ev = (struct proc_event *)msg->data;
 135        memset(&ev->event_data, 0, sizeof(ev->event_data));
 136        ev->what = which_id;
 137        ev->event_data.id.process_pid = task->pid;
 138        ev->event_data.id.process_tgid = task->tgid;
 139        rcu_read_lock();
 140        cred = __task_cred(task);
 141        if (which_id == PROC_EVENT_UID) {
 142                ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
 143                ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
 144        } else if (which_id == PROC_EVENT_GID) {
 145                ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
 146                ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
 147        } else {
 148                rcu_read_unlock();
 149                return;
 150        }
 151        rcu_read_unlock();
 152        ev->timestamp_ns = ktime_get_ns();
 153
 154        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 155        msg->ack = 0; /* not used */
 156        msg->len = sizeof(*ev);
 157        msg->flags = 0; /* not used */
 158        send_msg(msg);
 159}
 160
 161void proc_sid_connector(struct task_struct *task)
 162{
 163        struct cn_msg *msg;
 164        struct proc_event *ev;
 165        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 166
 167        if (atomic_read(&proc_event_num_listeners) < 1)
 168                return;
 169
 170        msg = buffer_to_cn_msg(buffer);
 171        ev = (struct proc_event *)msg->data;
 172        memset(&ev->event_data, 0, sizeof(ev->event_data));
 173        ev->timestamp_ns = ktime_get_ns();
 174        ev->what = PROC_EVENT_SID;
 175        ev->event_data.sid.process_pid = task->pid;
 176        ev->event_data.sid.process_tgid = task->tgid;
 177
 178        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 179        msg->ack = 0; /* not used */
 180        msg->len = sizeof(*ev);
 181        msg->flags = 0; /* not used */
 182        send_msg(msg);
 183}
 184
 185void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 186{
 187        struct cn_msg *msg;
 188        struct proc_event *ev;
 189        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 190
 191        if (atomic_read(&proc_event_num_listeners) < 1)
 192                return;
 193
 194        msg = buffer_to_cn_msg(buffer);
 195        ev = (struct proc_event *)msg->data;
 196        memset(&ev->event_data, 0, sizeof(ev->event_data));
 197        ev->timestamp_ns = ktime_get_ns();
 198        ev->what = PROC_EVENT_PTRACE;
 199        ev->event_data.ptrace.process_pid  = task->pid;
 200        ev->event_data.ptrace.process_tgid = task->tgid;
 201        if (ptrace_id == PTRACE_ATTACH) {
 202                ev->event_data.ptrace.tracer_pid  = current->pid;
 203                ev->event_data.ptrace.tracer_tgid = current->tgid;
 204        } else if (ptrace_id == PTRACE_DETACH) {
 205                ev->event_data.ptrace.tracer_pid  = 0;
 206                ev->event_data.ptrace.tracer_tgid = 0;
 207        } else
 208                return;
 209
 210        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 211        msg->ack = 0; /* not used */
 212        msg->len = sizeof(*ev);
 213        msg->flags = 0; /* not used */
 214        send_msg(msg);
 215}
 216
 217void proc_comm_connector(struct task_struct *task)
 218{
 219        struct cn_msg *msg;
 220        struct proc_event *ev;
 221        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 222
 223        if (atomic_read(&proc_event_num_listeners) < 1)
 224                return;
 225
 226        msg = buffer_to_cn_msg(buffer);
 227        ev = (struct proc_event *)msg->data;
 228        memset(&ev->event_data, 0, sizeof(ev->event_data));
 229        ev->timestamp_ns = ktime_get_ns();
 230        ev->what = PROC_EVENT_COMM;
 231        ev->event_data.comm.process_pid  = task->pid;
 232        ev->event_data.comm.process_tgid = task->tgid;
 233        get_task_comm(ev->event_data.comm.comm, task);
 234
 235        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 236        msg->ack = 0; /* not used */
 237        msg->len = sizeof(*ev);
 238        msg->flags = 0; /* not used */
 239        send_msg(msg);
 240}
 241
 242void proc_coredump_connector(struct task_struct *task)
 243{
 244        struct cn_msg *msg;
 245        struct proc_event *ev;
 246        struct task_struct *parent;
 247        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 248
 249        if (atomic_read(&proc_event_num_listeners) < 1)
 250                return;
 251
 252        msg = buffer_to_cn_msg(buffer);
 253        ev = (struct proc_event *)msg->data;
 254        memset(&ev->event_data, 0, sizeof(ev->event_data));
 255        ev->timestamp_ns = ktime_get_ns();
 256        ev->what = PROC_EVENT_COREDUMP;
 257        ev->event_data.coredump.process_pid = task->pid;
 258        ev->event_data.coredump.process_tgid = task->tgid;
 259
 260        rcu_read_lock();
 261        if (pid_alive(task)) {
 262                parent = rcu_dereference(task->real_parent);
 263                ev->event_data.coredump.parent_pid = parent->pid;
 264                ev->event_data.coredump.parent_tgid = parent->tgid;
 265        }
 266        rcu_read_unlock();
 267
 268        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 269        msg->ack = 0; /* not used */
 270        msg->len = sizeof(*ev);
 271        msg->flags = 0; /* not used */
 272        send_msg(msg);
 273}
 274
 275void proc_exit_connector(struct task_struct *task)
 276{
 277        struct cn_msg *msg;
 278        struct proc_event *ev;
 279        struct task_struct *parent;
 280        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 281
 282        if (atomic_read(&proc_event_num_listeners) < 1)
 283                return;
 284
 285        msg = buffer_to_cn_msg(buffer);
 286        ev = (struct proc_event *)msg->data;
 287        memset(&ev->event_data, 0, sizeof(ev->event_data));
 288        ev->timestamp_ns = ktime_get_ns();
 289        ev->what = PROC_EVENT_EXIT;
 290        ev->event_data.exit.process_pid = task->pid;
 291        ev->event_data.exit.process_tgid = task->tgid;
 292        ev->event_data.exit.exit_code = task->exit_code;
 293        ev->event_data.exit.exit_signal = task->exit_signal;
 294
 295        rcu_read_lock();
 296        if (pid_alive(task)) {
 297                parent = rcu_dereference(task->real_parent);
 298                ev->event_data.exit.parent_pid = parent->pid;
 299                ev->event_data.exit.parent_tgid = parent->tgid;
 300        }
 301        rcu_read_unlock();
 302
 303        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 304        msg->ack = 0; /* not used */
 305        msg->len = sizeof(*ev);
 306        msg->flags = 0; /* not used */
 307        send_msg(msg);
 308}
 309
 310/*
 311 * Send an acknowledgement message to userspace
 312 *
 313 * Use 0 for success, EFOO otherwise.
 314 * Note: this is the negative of conventional kernel error
 315 * values because it's not being returned via syscall return
 316 * mechanisms.
 317 */
 318static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
 319{
 320        struct cn_msg *msg;
 321        struct proc_event *ev;
 322        __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
 323
 324        if (atomic_read(&proc_event_num_listeners) < 1)
 325                return;
 326
 327        msg = buffer_to_cn_msg(buffer);
 328        ev = (struct proc_event *)msg->data;
 329        memset(&ev->event_data, 0, sizeof(ev->event_data));
 330        msg->seq = rcvd_seq;
 331        ev->timestamp_ns = ktime_get_ns();
 332        ev->cpu = -1;
 333        ev->what = PROC_EVENT_NONE;
 334        ev->event_data.ack.err = err;
 335        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
 336        msg->ack = rcvd_ack + 1;
 337        msg->len = sizeof(*ev);
 338        msg->flags = 0; /* not used */
 339        send_msg(msg);
 340}
 341
 342/**
 343 * cn_proc_mcast_ctl
 344 * @data: message sent from userspace via the connector
 345 */
 346static void cn_proc_mcast_ctl(struct cn_msg *msg,
 347                              struct netlink_skb_parms *nsp)
 348{
 349        enum proc_cn_mcast_op *mc_op = NULL;
 350        int err = 0;
 351
 352        if (msg->len != sizeof(*mc_op))
 353                return;
 354
 355        /* 
 356         * Events are reported with respect to the initial pid
 357         * and user namespaces so ignore requestors from
 358         * other namespaces.
 359         */
 360        if ((current_user_ns() != &init_user_ns) ||
 361            (task_active_pid_ns(current) != &init_pid_ns))
 362                return;
 363
 364        /* Can only change if privileged. */
 365        if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
 366                err = EPERM;
 367                goto out;
 368        }
 369
 370        mc_op = (enum proc_cn_mcast_op *)msg->data;
 371        switch (*mc_op) {
 372        case PROC_CN_MCAST_LISTEN:
 373                atomic_inc(&proc_event_num_listeners);
 374                break;
 375        case PROC_CN_MCAST_IGNORE:
 376                atomic_dec(&proc_event_num_listeners);
 377                break;
 378        default:
 379                err = EINVAL;
 380                break;
 381        }
 382
 383out:
 384        cn_proc_ack(err, msg->seq, msg->ack);
 385}
 386
 387/*
 388 * cn_proc_init - initialization entry point
 389 *
 390 * Adds the connector callback to the connector driver.
 391 */
 392static int __init cn_proc_init(void)
 393{
 394        int err = cn_add_callback(&cn_proc_event_id,
 395                                  "cn_proc",
 396                                  &cn_proc_mcast_ctl);
 397        if (err) {
 398                pr_warn("cn_proc failed to register\n");
 399                return err;
 400        }
 401        return 0;
 402}
 403device_initcall(cn_proc_init);
 404