1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/magic.h>
16#include <linux/major.h>
17#include <linux/mount.h>
18#include <linux/namei.h>
19#include <linux/fs.h>
20#include <linux/kdev_t.h>
21#include <linux/parser.h>
22#include <linux/filter.h>
23#include <linux/bpf.h>
24#include <linux/bpf_trace.h>
25
26enum bpf_type {
27 BPF_TYPE_UNSPEC = 0,
28 BPF_TYPE_PROG,
29 BPF_TYPE_MAP,
30 BPF_TYPE_LINK,
31};
32
33static void *bpf_any_get(void *raw, enum bpf_type type)
34{
35 switch (type) {
36 case BPF_TYPE_PROG:
37 bpf_prog_inc(raw);
38 break;
39 case BPF_TYPE_MAP:
40 bpf_map_inc_with_uref(raw);
41 break;
42 case BPF_TYPE_LINK:
43 bpf_link_inc(raw);
44 break;
45 default:
46 WARN_ON_ONCE(1);
47 break;
48 }
49
50 return raw;
51}
52
53static void bpf_any_put(void *raw, enum bpf_type type)
54{
55 switch (type) {
56 case BPF_TYPE_PROG:
57 bpf_prog_put(raw);
58 break;
59 case BPF_TYPE_MAP:
60 bpf_map_put_with_uref(raw);
61 break;
62 case BPF_TYPE_LINK:
63 bpf_link_put(raw);
64 break;
65 default:
66 WARN_ON_ONCE(1);
67 break;
68 }
69}
70
71static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
72{
73 void *raw;
74
75 raw = bpf_map_get_with_uref(ufd);
76 if (!IS_ERR(raw)) {
77 *type = BPF_TYPE_MAP;
78 return raw;
79 }
80
81 raw = bpf_prog_get(ufd);
82 if (!IS_ERR(raw)) {
83 *type = BPF_TYPE_PROG;
84 return raw;
85 }
86
87 raw = bpf_link_get_from_fd(ufd);
88 if (!IS_ERR(raw)) {
89 *type = BPF_TYPE_LINK;
90 return raw;
91 }
92
93 return ERR_PTR(-EINVAL);
94}
95
96static const struct inode_operations bpf_dir_iops;
97
98static const struct inode_operations bpf_prog_iops = { };
99static const struct inode_operations bpf_map_iops = { };
100static const struct inode_operations bpf_link_iops = { };
101
102static struct inode *bpf_get_inode(struct super_block *sb,
103 const struct inode *dir,
104 umode_t mode)
105{
106 struct inode *inode;
107
108 switch (mode & S_IFMT) {
109 case S_IFDIR:
110 case S_IFREG:
111 case S_IFLNK:
112 break;
113 default:
114 return ERR_PTR(-EINVAL);
115 }
116
117 inode = new_inode(sb);
118 if (!inode)
119 return ERR_PTR(-ENOSPC);
120
121 inode->i_ino = get_next_ino();
122 inode->i_atime = current_time(inode);
123 inode->i_mtime = inode->i_atime;
124 inode->i_ctime = inode->i_atime;
125
126 inode_init_owner(inode, dir, mode);
127
128 return inode;
129}
130
131static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
132{
133 *type = BPF_TYPE_UNSPEC;
134 if (inode->i_op == &bpf_prog_iops)
135 *type = BPF_TYPE_PROG;
136 else if (inode->i_op == &bpf_map_iops)
137 *type = BPF_TYPE_MAP;
138 else if (inode->i_op == &bpf_link_iops)
139 *type = BPF_TYPE_LINK;
140 else
141 return -EACCES;
142
143 return 0;
144}
145
146static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
147 struct inode *dir)
148{
149 d_instantiate(dentry, inode);
150 dget(dentry);
151
152 dir->i_mtime = current_time(dir);
153 dir->i_ctime = dir->i_mtime;
154}
155
156static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
157{
158 struct inode *inode;
159
160 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
161 if (IS_ERR(inode))
162 return PTR_ERR(inode);
163
164 inode->i_op = &bpf_dir_iops;
165 inode->i_fop = &simple_dir_operations;
166
167 inc_nlink(inode);
168 inc_nlink(dir);
169
170 bpf_dentry_finalize(dentry, inode, dir);
171 return 0;
172}
173
174struct map_iter {
175 void *key;
176 bool done;
177};
178
179static struct map_iter *map_iter(struct seq_file *m)
180{
181 return m->private;
182}
183
184static struct bpf_map *seq_file_to_map(struct seq_file *m)
185{
186 return file_inode(m->file)->i_private;
187}
188
189static void map_iter_free(struct map_iter *iter)
190{
191 if (iter) {
192 kfree(iter->key);
193 kfree(iter);
194 }
195}
196
197static struct map_iter *map_iter_alloc(struct bpf_map *map)
198{
199 struct map_iter *iter;
200
201 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202 if (!iter)
203 goto error;
204
205 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
206 if (!iter->key)
207 goto error;
208
209 return iter;
210
211error:
212 map_iter_free(iter);
213 return NULL;
214}
215
216static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
217{
218 struct bpf_map *map = seq_file_to_map(m);
219 void *key = map_iter(m)->key;
220 void *prev_key;
221
222 (*pos)++;
223 if (map_iter(m)->done)
224 return NULL;
225
226 if (unlikely(v == SEQ_START_TOKEN))
227 prev_key = NULL;
228 else
229 prev_key = key;
230
231 if (map->ops->map_get_next_key(map, prev_key, key)) {
232 map_iter(m)->done = true;
233 return NULL;
234 }
235 return key;
236}
237
238static void *map_seq_start(struct seq_file *m, loff_t *pos)
239{
240 if (map_iter(m)->done)
241 return NULL;
242
243 return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
244}
245
246static void map_seq_stop(struct seq_file *m, void *v)
247{
248}
249
250static int map_seq_show(struct seq_file *m, void *v)
251{
252 struct bpf_map *map = seq_file_to_map(m);
253 void *key = map_iter(m)->key;
254
255 if (unlikely(v == SEQ_START_TOKEN)) {
256 seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
257 seq_puts(m, "# WARNING!! The output format will change\n");
258 } else {
259 map->ops->map_seq_show_elem(map, key, m);
260 }
261
262 return 0;
263}
264
265static const struct seq_operations bpffs_map_seq_ops = {
266 .start = map_seq_start,
267 .next = map_seq_next,
268 .show = map_seq_show,
269 .stop = map_seq_stop,
270};
271
272static int bpffs_map_open(struct inode *inode, struct file *file)
273{
274 struct bpf_map *map = inode->i_private;
275 struct map_iter *iter;
276 struct seq_file *m;
277 int err;
278
279 iter = map_iter_alloc(map);
280 if (!iter)
281 return -ENOMEM;
282
283 err = seq_open(file, &bpffs_map_seq_ops);
284 if (err) {
285 map_iter_free(iter);
286 return err;
287 }
288
289 m = file->private_data;
290 m->private = iter;
291
292 return 0;
293}
294
295static int bpffs_map_release(struct inode *inode, struct file *file)
296{
297 struct seq_file *m = file->private_data;
298
299 map_iter_free(map_iter(m));
300
301 return seq_release(inode, file);
302}
303
304
305
306
307
308
309
310
311
312
313
314static const struct file_operations bpffs_map_fops = {
315 .open = bpffs_map_open,
316 .read = seq_read,
317 .release = bpffs_map_release,
318};
319
320static int bpffs_obj_open(struct inode *inode, struct file *file)
321{
322 return -EIO;
323}
324
325static const struct file_operations bpffs_obj_fops = {
326 .open = bpffs_obj_open,
327};
328
329static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
330 const struct inode_operations *iops,
331 const struct file_operations *fops)
332{
333 struct inode *dir = dentry->d_parent->d_inode;
334 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335 if (IS_ERR(inode))
336 return PTR_ERR(inode);
337
338 inode->i_op = iops;
339 inode->i_fop = fops;
340 inode->i_private = raw;
341
342 bpf_dentry_finalize(dentry, inode, dir);
343 return 0;
344}
345
346static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347{
348 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
349 &bpffs_obj_fops);
350}
351
352static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
353{
354 struct bpf_map *map = arg;
355
356 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
357 bpf_map_support_seq_show(map) ?
358 &bpffs_map_fops : &bpffs_obj_fops);
359}
360
361static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
362{
363 return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
364 &bpffs_obj_fops);
365}
366
367static struct dentry *
368bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
369{
370
371
372
373 if (strchr(dentry->d_name.name, '.'))
374 return ERR_PTR(-EPERM);
375
376 return simple_lookup(dir, dentry, flags);
377}
378
379static int bpf_symlink(struct inode *dir, struct dentry *dentry,
380 const char *target)
381{
382 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
383 struct inode *inode;
384
385 if (!link)
386 return -ENOMEM;
387
388 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
389 if (IS_ERR(inode)) {
390 kfree(link);
391 return PTR_ERR(inode);
392 }
393
394 inode->i_op = &simple_symlink_inode_operations;
395 inode->i_link = link;
396
397 bpf_dentry_finalize(dentry, inode, dir);
398 return 0;
399}
400
401static const struct inode_operations bpf_dir_iops = {
402 .lookup = bpf_lookup,
403 .mkdir = bpf_mkdir,
404 .symlink = bpf_symlink,
405 .rmdir = simple_rmdir,
406 .rename = simple_rename,
407 .link = simple_link,
408 .unlink = simple_unlink,
409};
410
411static int bpf_obj_do_pin(const char __user *pathname, void *raw,
412 enum bpf_type type)
413{
414 struct dentry *dentry;
415 struct inode *dir;
416 struct path path;
417 umode_t mode;
418 int ret;
419
420 dentry = user_path_create(AT_FDCWD, pathname, &path, 0);
421 if (IS_ERR(dentry))
422 return PTR_ERR(dentry);
423
424 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
425
426 ret = security_path_mknod(&path, dentry, mode, 0);
427 if (ret)
428 goto out;
429
430 dir = d_inode(path.dentry);
431 if (dir->i_op != &bpf_dir_iops) {
432 ret = -EPERM;
433 goto out;
434 }
435
436 switch (type) {
437 case BPF_TYPE_PROG:
438 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
439 break;
440 case BPF_TYPE_MAP:
441 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
442 break;
443 case BPF_TYPE_LINK:
444 ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
445 break;
446 default:
447 ret = -EPERM;
448 }
449out:
450 done_path_create(&path, dentry);
451 return ret;
452}
453
454int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
455{
456 enum bpf_type type;
457 void *raw;
458 int ret;
459
460 raw = bpf_fd_probe_obj(ufd, &type);
461 if (IS_ERR(raw))
462 return PTR_ERR(raw);
463
464 ret = bpf_obj_do_pin(pathname, raw, type);
465 if (ret != 0)
466 bpf_any_put(raw, type);
467
468 return ret;
469}
470
471static void *bpf_obj_do_get(const char __user *pathname,
472 enum bpf_type *type, int flags)
473{
474 struct inode *inode;
475 struct path path;
476 void *raw;
477 int ret;
478
479 ret = user_path_at(AT_FDCWD, pathname, LOOKUP_FOLLOW, &path);
480 if (ret)
481 return ERR_PTR(ret);
482
483 inode = d_backing_inode(path.dentry);
484 ret = inode_permission(inode, ACC_MODE(flags));
485 if (ret)
486 goto out;
487
488 ret = bpf_inode_type(inode, type);
489 if (ret)
490 goto out;
491
492 raw = bpf_any_get(inode->i_private, *type);
493 if (!IS_ERR(raw))
494 touch_atime(&path);
495
496 path_put(&path);
497 return raw;
498out:
499 path_put(&path);
500 return ERR_PTR(ret);
501}
502
503int bpf_obj_get_user(const char __user *pathname, int flags)
504{
505 enum bpf_type type = BPF_TYPE_UNSPEC;
506 int f_flags;
507 void *raw;
508 int ret;
509
510 f_flags = bpf_get_file_flag(flags);
511 if (f_flags < 0)
512 return f_flags;
513
514 raw = bpf_obj_do_get(pathname, &type, f_flags);
515 if (IS_ERR(raw))
516 return PTR_ERR(raw);
517
518 if (type == BPF_TYPE_PROG)
519 ret = bpf_prog_new_fd(raw);
520 else if (type == BPF_TYPE_MAP)
521 ret = bpf_map_new_fd(raw, f_flags);
522 else if (type == BPF_TYPE_LINK)
523 ret = bpf_link_new_fd(raw);
524 else
525 return -ENOENT;
526
527 if (ret < 0)
528 bpf_any_put(raw, type);
529 return ret;
530}
531
532static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
533{
534 struct bpf_prog *prog;
535 int ret = inode_permission(inode, MAY_READ);
536 if (ret)
537 return ERR_PTR(ret);
538
539 if (inode->i_op == &bpf_map_iops)
540 return ERR_PTR(-EINVAL);
541 if (inode->i_op == &bpf_link_iops)
542 return ERR_PTR(-EINVAL);
543 if (inode->i_op != &bpf_prog_iops)
544 return ERR_PTR(-EACCES);
545
546 prog = inode->i_private;
547
548 ret = security_bpf_prog(prog);
549 if (ret < 0)
550 return ERR_PTR(ret);
551
552 if (!bpf_prog_get_ok(prog, &type, false))
553 return ERR_PTR(-EINVAL);
554
555 bpf_prog_inc(prog);
556 return prog;
557}
558
559struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
560{
561 struct bpf_prog *prog;
562 struct path path;
563 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
564 if (ret)
565 return ERR_PTR(ret);
566 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
567 if (!IS_ERR(prog))
568 touch_atime(&path);
569 path_put(&path);
570 return prog;
571}
572EXPORT_SYMBOL(bpf_prog_get_type_path);
573
574
575
576
577static int bpf_show_options(struct seq_file *m, struct dentry *root)
578{
579 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
580
581 if (mode != S_IRWXUGO)
582 seq_printf(m, ",mode=%o", mode);
583 return 0;
584}
585
586static void bpf_destroy_inode_deferred(struct rcu_head *head)
587{
588 struct inode *inode = container_of(head, struct inode, i_rcu);
589 enum bpf_type type;
590
591 if (S_ISLNK(inode->i_mode))
592 kfree(inode->i_link);
593 if (!bpf_inode_type(inode, &type))
594 bpf_any_put(inode->i_private, type);
595 free_inode_nonrcu(inode);
596}
597
598static void bpf_destroy_inode(struct inode *inode)
599{
600 call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
601}
602
603static const struct super_operations bpf_super_ops = {
604 .statfs = simple_statfs,
605 .drop_inode = generic_delete_inode,
606 .show_options = bpf_show_options,
607 .destroy_inode = bpf_destroy_inode,
608};
609
610enum {
611 OPT_MODE,
612 OPT_ERR,
613};
614
615static const match_table_t bpf_mount_tokens = {
616 { OPT_MODE, "mode=%o" },
617 { OPT_ERR, NULL },
618};
619
620struct bpf_mount_opts {
621 umode_t mode;
622};
623
624static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
625{
626 substring_t args[MAX_OPT_ARGS];
627 int option, token;
628 char *ptr;
629
630 opts->mode = S_IRWXUGO;
631
632 while ((ptr = strsep(&data, ",")) != NULL) {
633 if (!*ptr)
634 continue;
635
636 token = match_token(ptr, bpf_mount_tokens, args);
637 switch (token) {
638 case OPT_MODE:
639 if (match_octal(&args[0], &option))
640 return -EINVAL;
641 opts->mode = option & S_IALLUGO;
642 break;
643
644
645
646
647 }
648 }
649
650 return 0;
651}
652
653static int bpf_fill_super(struct super_block *sb, void *data, int silent)
654{
655 static const struct tree_descr bpf_rfiles[] = { { "" } };
656 struct bpf_mount_opts opts;
657 struct inode *inode;
658 int ret;
659
660 ret = bpf_parse_options(data, &opts);
661 if (ret)
662 return ret;
663
664 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
665 if (ret)
666 return ret;
667
668 sb->s_op = &bpf_super_ops;
669
670 inode = sb->s_root->d_inode;
671 inode->i_op = &bpf_dir_iops;
672 inode->i_mode &= ~S_IALLUGO;
673 inode->i_mode |= S_ISVTX | opts.mode;
674
675 return 0;
676}
677
678static struct dentry *bpf_mount(struct file_system_type *type, int flags,
679 const char *dev_name, void *data)
680{
681 return mount_nodev(type, flags, data, bpf_fill_super);
682}
683
684static struct file_system_type bpf_fs_type = {
685 .owner = THIS_MODULE,
686 .name = "bpf",
687 .mount = bpf_mount,
688 .kill_sb = kill_litter_super,
689};
690
691static int __init bpf_init(void)
692{
693 int ret;
694
695 ret = sysfs_create_mount_point(fs_kobj, "bpf");
696 if (ret)
697 return ret;
698
699 ret = register_filesystem(&bpf_fs_type);
700 if (ret)
701 sysfs_remove_mount_point(fs_kobj, "bpf");
702
703 return ret;
704}
705fs_initcall(bpf_init);
706