1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kernel.h>
26#include <linux/ktime.h>
27#include <linux/init.h>
28#include <linux/connector.h>
29#include <linux/gfp.h>
30#include <linux/ptrace.h>
31#include <linux/atomic.h>
32#include <linux/pid_namespace.h>
33
34#include <linux/cn_proc.h>
35#include <linux/local_lock.h>
36
37
38
39
40
41
42
43
44#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
45
46
47static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
48{
49 BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
50 return (struct cn_msg *)(buffer + 4);
51}
52
53static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
54static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
55
56
57struct local_event {
58 local_lock_t lock;
59 __u32 count;
60};
61static DEFINE_PER_CPU(struct local_event, local_event) = {
62 .lock = INIT_LOCAL_LOCK(lock),
63};
64
65static inline void send_msg(struct cn_msg *msg)
66{
67 local_lock(&local_event.lock);
68
69 msg->seq = __this_cpu_inc_return(local_event.count) - 1;
70 ((struct proc_event *)msg->data)->cpu = smp_processor_id();
71
72
73
74
75
76
77
78 cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
79
80 local_unlock(&local_event.lock);
81}
82
83void proc_fork_connector(struct task_struct *task)
84{
85 struct cn_msg *msg;
86 struct proc_event *ev;
87 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
88 struct task_struct *parent;
89
90 if (atomic_read(&proc_event_num_listeners) < 1)
91 return;
92
93 msg = buffer_to_cn_msg(buffer);
94 ev = (struct proc_event *)msg->data;
95 memset(&ev->event_data, 0, sizeof(ev->event_data));
96 ev->timestamp_ns = ktime_get_ns();
97 ev->what = PROC_EVENT_FORK;
98 rcu_read_lock();
99 parent = rcu_dereference(task->real_parent);
100 ev->event_data.fork.parent_pid = parent->pid;
101 ev->event_data.fork.parent_tgid = parent->tgid;
102 rcu_read_unlock();
103 ev->event_data.fork.child_pid = task->pid;
104 ev->event_data.fork.child_tgid = task->tgid;
105
106 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
107 msg->ack = 0;
108 msg->len = sizeof(*ev);
109 msg->flags = 0;
110 send_msg(msg);
111}
112
113void proc_exec_connector(struct task_struct *task)
114{
115 struct cn_msg *msg;
116 struct proc_event *ev;
117 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
118
119 if (atomic_read(&proc_event_num_listeners) < 1)
120 return;
121
122 msg = buffer_to_cn_msg(buffer);
123 ev = (struct proc_event *)msg->data;
124 memset(&ev->event_data, 0, sizeof(ev->event_data));
125 ev->timestamp_ns = ktime_get_ns();
126 ev->what = PROC_EVENT_EXEC;
127 ev->event_data.exec.process_pid = task->pid;
128 ev->event_data.exec.process_tgid = task->tgid;
129
130 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
131 msg->ack = 0;
132 msg->len = sizeof(*ev);
133 msg->flags = 0;
134 send_msg(msg);
135}
136
137void proc_id_connector(struct task_struct *task, int which_id)
138{
139 struct cn_msg *msg;
140 struct proc_event *ev;
141 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
142 const struct cred *cred;
143
144 if (atomic_read(&proc_event_num_listeners) < 1)
145 return;
146
147 msg = buffer_to_cn_msg(buffer);
148 ev = (struct proc_event *)msg->data;
149 memset(&ev->event_data, 0, sizeof(ev->event_data));
150 ev->what = which_id;
151 ev->event_data.id.process_pid = task->pid;
152 ev->event_data.id.process_tgid = task->tgid;
153 rcu_read_lock();
154 cred = __task_cred(task);
155 if (which_id == PROC_EVENT_UID) {
156 ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
157 ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
158 } else if (which_id == PROC_EVENT_GID) {
159 ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
160 ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
161 } else {
162 rcu_read_unlock();
163 return;
164 }
165 rcu_read_unlock();
166 ev->timestamp_ns = ktime_get_ns();
167
168 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
169 msg->ack = 0;
170 msg->len = sizeof(*ev);
171 msg->flags = 0;
172 send_msg(msg);
173}
174
175void proc_sid_connector(struct task_struct *task)
176{
177 struct cn_msg *msg;
178 struct proc_event *ev;
179 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
180
181 if (atomic_read(&proc_event_num_listeners) < 1)
182 return;
183
184 msg = buffer_to_cn_msg(buffer);
185 ev = (struct proc_event *)msg->data;
186 memset(&ev->event_data, 0, sizeof(ev->event_data));
187 ev->timestamp_ns = ktime_get_ns();
188 ev->what = PROC_EVENT_SID;
189 ev->event_data.sid.process_pid = task->pid;
190 ev->event_data.sid.process_tgid = task->tgid;
191
192 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
193 msg->ack = 0;
194 msg->len = sizeof(*ev);
195 msg->flags = 0;
196 send_msg(msg);
197}
198
199void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
200{
201 struct cn_msg *msg;
202 struct proc_event *ev;
203 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
204
205 if (atomic_read(&proc_event_num_listeners) < 1)
206 return;
207
208 msg = buffer_to_cn_msg(buffer);
209 ev = (struct proc_event *)msg->data;
210 memset(&ev->event_data, 0, sizeof(ev->event_data));
211 ev->timestamp_ns = ktime_get_ns();
212 ev->what = PROC_EVENT_PTRACE;
213 ev->event_data.ptrace.process_pid = task->pid;
214 ev->event_data.ptrace.process_tgid = task->tgid;
215 if (ptrace_id == PTRACE_ATTACH) {
216 ev->event_data.ptrace.tracer_pid = current->pid;
217 ev->event_data.ptrace.tracer_tgid = current->tgid;
218 } else if (ptrace_id == PTRACE_DETACH) {
219 ev->event_data.ptrace.tracer_pid = 0;
220 ev->event_data.ptrace.tracer_tgid = 0;
221 } else
222 return;
223
224 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
225 msg->ack = 0;
226 msg->len = sizeof(*ev);
227 msg->flags = 0;
228 send_msg(msg);
229}
230
231void proc_comm_connector(struct task_struct *task)
232{
233 struct cn_msg *msg;
234 struct proc_event *ev;
235 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
236
237 if (atomic_read(&proc_event_num_listeners) < 1)
238 return;
239
240 msg = buffer_to_cn_msg(buffer);
241 ev = (struct proc_event *)msg->data;
242 memset(&ev->event_data, 0, sizeof(ev->event_data));
243 ev->timestamp_ns = ktime_get_ns();
244 ev->what = PROC_EVENT_COMM;
245 ev->event_data.comm.process_pid = task->pid;
246 ev->event_data.comm.process_tgid = task->tgid;
247 get_task_comm(ev->event_data.comm.comm, task);
248
249 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
250 msg->ack = 0;
251 msg->len = sizeof(*ev);
252 msg->flags = 0;
253 send_msg(msg);
254}
255
256void proc_coredump_connector(struct task_struct *task)
257{
258 struct cn_msg *msg;
259 struct proc_event *ev;
260 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
261
262 if (atomic_read(&proc_event_num_listeners) < 1)
263 return;
264
265 msg = buffer_to_cn_msg(buffer);
266 ev = (struct proc_event *)msg->data;
267 memset(&ev->event_data, 0, sizeof(ev->event_data));
268 ev->timestamp_ns = ktime_get_ns();
269 ev->what = PROC_EVENT_COREDUMP;
270 ev->event_data.coredump.process_pid = task->pid;
271 ev->event_data.coredump.process_tgid = task->tgid;
272 ev->event_data.coredump.parent_pid = task->real_parent->pid;
273 ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
274
275 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
276 msg->ack = 0;
277 msg->len = sizeof(*ev);
278 msg->flags = 0;
279 send_msg(msg);
280}
281
282void proc_exit_connector(struct task_struct *task)
283{
284 struct cn_msg *msg;
285 struct proc_event *ev;
286 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
287
288 if (atomic_read(&proc_event_num_listeners) < 1)
289 return;
290
291 msg = buffer_to_cn_msg(buffer);
292 ev = (struct proc_event *)msg->data;
293 memset(&ev->event_data, 0, sizeof(ev->event_data));
294 ev->timestamp_ns = ktime_get_ns();
295 ev->what = PROC_EVENT_EXIT;
296 ev->event_data.exit.process_pid = task->pid;
297 ev->event_data.exit.process_tgid = task->tgid;
298 ev->event_data.exit.exit_code = task->exit_code;
299 ev->event_data.exit.exit_signal = task->exit_signal;
300 ev->event_data.exit.parent_pid = task->real_parent->pid;
301 ev->event_data.exit.parent_tgid = task->real_parent->tgid;
302
303 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
304 msg->ack = 0;
305 msg->len = sizeof(*ev);
306 msg->flags = 0;
307 send_msg(msg);
308}
309
310
311
312
313
314
315
316
317
318static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
319{
320 struct cn_msg *msg;
321 struct proc_event *ev;
322 __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
323
324 if (atomic_read(&proc_event_num_listeners) < 1)
325 return;
326
327 msg = buffer_to_cn_msg(buffer);
328 ev = (struct proc_event *)msg->data;
329 memset(&ev->event_data, 0, sizeof(ev->event_data));
330 msg->seq = rcvd_seq;
331 ev->timestamp_ns = ktime_get_ns();
332 ev->cpu = -1;
333 ev->what = PROC_EVENT_NONE;
334 ev->event_data.ack.err = err;
335 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
336 msg->ack = rcvd_ack + 1;
337 msg->len = sizeof(*ev);
338 msg->flags = 0;
339 send_msg(msg);
340}
341
342
343
344
345
346static void cn_proc_mcast_ctl(struct cn_msg *msg,
347 struct netlink_skb_parms *nsp)
348{
349 enum proc_cn_mcast_op *mc_op = NULL;
350 int err = 0;
351
352 if (msg->len != sizeof(*mc_op))
353 return;
354
355
356
357
358
359
360 if ((current_user_ns() != &init_user_ns) ||
361 (task_active_pid_ns(current) != &init_pid_ns))
362 return;
363
364
365 if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
366 err = EPERM;
367 goto out;
368 }
369
370 mc_op = (enum proc_cn_mcast_op *)msg->data;
371 switch (*mc_op) {
372 case PROC_CN_MCAST_LISTEN:
373 atomic_inc(&proc_event_num_listeners);
374 break;
375 case PROC_CN_MCAST_IGNORE:
376 atomic_dec(&proc_event_num_listeners);
377 break;
378 default:
379 err = EINVAL;
380 break;
381 }
382
383out:
384 cn_proc_ack(err, msg->seq, msg->ack);
385}
386
387
388
389
390
391
392static int __init cn_proc_init(void)
393{
394 int err = cn_add_callback(&cn_proc_event_id,
395 "cn_proc",
396 &cn_proc_mcast_ctl);
397 if (err) {
398 pr_warn("cn_proc failed to register\n");
399 return err;
400 }
401 return 0;
402}
403device_initcall(cn_proc_init);
404