1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/poison.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/security.h>
18#include <linux/workqueue.h>
19#include <linux/random.h>
20#include <linux/err.h>
21#include <linux/user_namespace.h>
22#include "internal.h"
23
24static struct kmem_cache *key_jar;
25struct rb_root key_serial_tree;
26DEFINE_SPINLOCK(key_serial_lock);
27
28struct rb_root key_user_tree;
29DEFINE_SPINLOCK(key_user_lock);
30
31unsigned int key_quota_root_maxkeys = 200;
32unsigned int key_quota_root_maxbytes = 20000;
33unsigned int key_quota_maxkeys = 200;
34unsigned int key_quota_maxbytes = 20000;
35
36static LIST_HEAD(key_types_list);
37static DECLARE_RWSEM(key_types_sem);
38
39static void key_cleanup(struct work_struct *work);
40static DECLARE_WORK(key_cleanup_task, key_cleanup);
41
42
43DEFINE_MUTEX(key_construction_mutex);
44
45
46static struct key_type key_type_dead = {
47 .name = "dead",
48};
49
50#ifdef KEY_DEBUGGING
51void __key_check(const struct key *key)
52{
53 printk("__key_check: key %p {%08x} should be {%08x}\n",
54 key, key->magic, KEY_DEBUG_MAGIC);
55 BUG();
56}
57#endif
58
59
60
61
62
63struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
64{
65 struct key_user *candidate = NULL, *user;
66 struct rb_node *parent = NULL;
67 struct rb_node **p;
68
69try_again:
70 p = &key_user_tree.rb_node;
71 spin_lock(&key_user_lock);
72
73
74 while (*p) {
75 parent = *p;
76 user = rb_entry(parent, struct key_user, node);
77
78 if (uid < user->uid)
79 p = &(*p)->rb_left;
80 else if (uid > user->uid)
81 p = &(*p)->rb_right;
82 else if (user_ns < user->user_ns)
83 p = &(*p)->rb_left;
84 else if (user_ns > user->user_ns)
85 p = &(*p)->rb_right;
86 else
87 goto found;
88 }
89
90
91 if (!candidate) {
92
93
94 spin_unlock(&key_user_lock);
95
96 user = NULL;
97 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
98 if (unlikely(!candidate))
99 goto out;
100
101
102
103
104 goto try_again;
105 }
106
107
108
109 atomic_set(&candidate->usage, 1);
110 atomic_set(&candidate->nkeys, 0);
111 atomic_set(&candidate->nikeys, 0);
112 candidate->uid = uid;
113 candidate->user_ns = get_user_ns(user_ns);
114 candidate->qnkeys = 0;
115 candidate->qnbytes = 0;
116 spin_lock_init(&candidate->lock);
117 mutex_init(&candidate->cons_lock);
118
119 rb_link_node(&candidate->node, parent, p);
120 rb_insert_color(&candidate->node, &key_user_tree);
121 spin_unlock(&key_user_lock);
122 user = candidate;
123 goto out;
124
125
126found:
127 atomic_inc(&user->usage);
128 spin_unlock(&key_user_lock);
129 kfree(candidate);
130out:
131 return user;
132}
133
134
135
136
137void key_user_put(struct key_user *user)
138{
139 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
140 rb_erase(&user->node, &key_user_tree);
141 spin_unlock(&key_user_lock);
142 put_user_ns(user->user_ns);
143
144 kfree(user);
145 }
146}
147
148
149
150
151
152static inline void key_alloc_serial(struct key *key)
153{
154 struct rb_node *parent, **p;
155 struct key *xkey;
156
157
158
159 do {
160 get_random_bytes(&key->serial, sizeof(key->serial));
161
162 key->serial >>= 1;
163 } while (key->serial < 3);
164
165 spin_lock(&key_serial_lock);
166
167attempt_insertion:
168 parent = NULL;
169 p = &key_serial_tree.rb_node;
170
171 while (*p) {
172 parent = *p;
173 xkey = rb_entry(parent, struct key, serial_node);
174
175 if (key->serial < xkey->serial)
176 p = &(*p)->rb_left;
177 else if (key->serial > xkey->serial)
178 p = &(*p)->rb_right;
179 else
180 goto serial_exists;
181 }
182
183
184 rb_link_node(&key->serial_node, parent, p);
185 rb_insert_color(&key->serial_node, &key_serial_tree);
186
187 spin_unlock(&key_serial_lock);
188 return;
189
190
191
192serial_exists:
193 for (;;) {
194 key->serial++;
195 if (key->serial < 3) {
196 key->serial = 3;
197 goto attempt_insertion;
198 }
199
200 parent = rb_next(parent);
201 if (!parent)
202 goto attempt_insertion;
203
204 xkey = rb_entry(parent, struct key, serial_node);
205 if (key->serial < xkey->serial)
206 goto attempt_insertion;
207 }
208}
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239struct key *key_alloc(struct key_type *type, const char *desc,
240 uid_t uid, gid_t gid, const struct cred *cred,
241 key_perm_t perm, unsigned long flags)
242{
243 struct key_user *user = NULL;
244 struct key *key;
245 size_t desclen, quotalen;
246 int ret;
247
248 key = ERR_PTR(-EINVAL);
249 if (!desc || !*desc)
250 goto error;
251
252 if (type->vet_description) {
253 ret = type->vet_description(desc);
254 if (ret < 0) {
255 key = ERR_PTR(ret);
256 goto error;
257 }
258 }
259
260 desclen = strlen(desc) + 1;
261 quotalen = desclen + type->def_datalen;
262
263
264 user = key_user_lookup(uid, cred->user->user_ns);
265 if (!user)
266 goto no_memory_1;
267
268
269
270 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
271 unsigned maxkeys = (uid == 0) ?
272 key_quota_root_maxkeys : key_quota_maxkeys;
273 unsigned maxbytes = (uid == 0) ?
274 key_quota_root_maxbytes : key_quota_maxbytes;
275
276 spin_lock(&user->lock);
277 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
278 if (user->qnkeys + 1 >= maxkeys ||
279 user->qnbytes + quotalen >= maxbytes ||
280 user->qnbytes + quotalen < user->qnbytes)
281 goto no_quota;
282 }
283
284 user->qnkeys++;
285 user->qnbytes += quotalen;
286 spin_unlock(&user->lock);
287 }
288
289
290 key = kmem_cache_alloc(key_jar, GFP_KERNEL);
291 if (!key)
292 goto no_memory_2;
293
294 if (desc) {
295 key->description = kmemdup(desc, desclen, GFP_KERNEL);
296 if (!key->description)
297 goto no_memory_3;
298 }
299
300 atomic_set(&key->usage, 1);
301 init_rwsem(&key->sem);
302 key->type = type;
303 key->user = user;
304 key->quotalen = quotalen;
305 key->datalen = type->def_datalen;
306 key->uid = uid;
307 key->gid = gid;
308 key->perm = perm;
309 key->flags = 0;
310 key->expiry = 0;
311 key->payload.data = NULL;
312 key->security = NULL;
313
314 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
315 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
316
317 memset(&key->type_data, 0, sizeof(key->type_data));
318
319#ifdef KEY_DEBUGGING
320 key->magic = KEY_DEBUG_MAGIC;
321#endif
322
323
324 ret = security_key_alloc(key, cred, flags);
325 if (ret < 0)
326 goto security_error;
327
328
329 atomic_inc(&user->nkeys);
330 key_alloc_serial(key);
331
332error:
333 return key;
334
335security_error:
336 kfree(key->description);
337 kmem_cache_free(key_jar, key);
338 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
339 spin_lock(&user->lock);
340 user->qnkeys--;
341 user->qnbytes -= quotalen;
342 spin_unlock(&user->lock);
343 }
344 key_user_put(user);
345 key = ERR_PTR(ret);
346 goto error;
347
348no_memory_3:
349 kmem_cache_free(key_jar, key);
350no_memory_2:
351 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
352 spin_lock(&user->lock);
353 user->qnkeys--;
354 user->qnbytes -= quotalen;
355 spin_unlock(&user->lock);
356 }
357 key_user_put(user);
358no_memory_1:
359 key = ERR_PTR(-ENOMEM);
360 goto error;
361
362no_quota:
363 spin_unlock(&user->lock);
364 key_user_put(user);
365 key = ERR_PTR(-EDQUOT);
366 goto error;
367}
368EXPORT_SYMBOL(key_alloc);
369
370
371
372
373
374
375
376
377
378
379
380
381int key_payload_reserve(struct key *key, size_t datalen)
382{
383 int delta = (int)datalen - key->datalen;
384 int ret = 0;
385
386 key_check(key);
387
388
389 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
390 unsigned maxbytes = (key->user->uid == 0) ?
391 key_quota_root_maxbytes : key_quota_maxbytes;
392
393 spin_lock(&key->user->lock);
394
395 if (delta > 0 &&
396 (key->user->qnbytes + delta >= maxbytes ||
397 key->user->qnbytes + delta < key->user->qnbytes)) {
398 ret = -EDQUOT;
399 }
400 else {
401 key->user->qnbytes += delta;
402 key->quotalen += delta;
403 }
404 spin_unlock(&key->user->lock);
405 }
406
407
408 if (ret == 0)
409 key->datalen = datalen;
410
411 return ret;
412}
413EXPORT_SYMBOL(key_payload_reserve);
414
415
416
417
418
419
420
421static int __key_instantiate_and_link(struct key *key,
422 const void *data,
423 size_t datalen,
424 struct key *keyring,
425 struct key *authkey,
426 unsigned long *_prealloc)
427{
428 int ret, awaken;
429
430 key_check(key);
431 key_check(keyring);
432
433 awaken = 0;
434 ret = -EBUSY;
435
436 mutex_lock(&key_construction_mutex);
437
438
439 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
440
441 ret = key->type->instantiate(key, data, datalen);
442
443 if (ret == 0) {
444
445 atomic_inc(&key->user->nikeys);
446 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
447
448 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
449 awaken = 1;
450
451
452 if (keyring)
453 __key_link(keyring, key, _prealloc);
454
455
456 if (authkey)
457 key_revoke(authkey);
458 }
459 }
460
461 mutex_unlock(&key_construction_mutex);
462
463
464 if (awaken)
465 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
466
467 return ret;
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486int key_instantiate_and_link(struct key *key,
487 const void *data,
488 size_t datalen,
489 struct key *keyring,
490 struct key *authkey)
491{
492 unsigned long prealloc;
493 int ret;
494
495 if (keyring) {
496 ret = __key_link_begin(keyring, key->type, key->description,
497 &prealloc);
498 if (ret < 0)
499 return ret;
500 }
501
502 ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
503 &prealloc);
504
505 if (keyring)
506 __key_link_end(keyring, key->type, prealloc);
507
508 return ret;
509}
510
511EXPORT_SYMBOL(key_instantiate_and_link);
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534int key_reject_and_link(struct key *key,
535 unsigned timeout,
536 unsigned error,
537 struct key *keyring,
538 struct key *authkey)
539{
540 unsigned long prealloc;
541 struct timespec now;
542 int ret, awaken, link_ret = 0;
543
544 key_check(key);
545 key_check(keyring);
546
547 awaken = 0;
548 ret = -EBUSY;
549
550 if (keyring)
551 link_ret = __key_link_begin(keyring, key->type,
552 key->description, &prealloc);
553
554 mutex_lock(&key_construction_mutex);
555
556
557 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
558
559 atomic_inc(&key->user->nikeys);
560 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
561 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
562 key->type_data.reject_error = -error;
563 now = current_kernel_time();
564 key->expiry = now.tv_sec + timeout;
565 key_schedule_gc(key->expiry + key_gc_delay);
566
567 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
568 awaken = 1;
569
570 ret = 0;
571
572
573 if (keyring && link_ret == 0)
574 __key_link(keyring, key, &prealloc);
575
576
577 if (authkey)
578 key_revoke(authkey);
579 }
580
581 mutex_unlock(&key_construction_mutex);
582
583 if (keyring)
584 __key_link_end(keyring, key->type, prealloc);
585
586
587 if (awaken)
588 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
589
590 return ret == 0 ? link_ret : ret;
591}
592EXPORT_SYMBOL(key_reject_and_link);
593
594
595
596
597
598
599
600
601static void key_cleanup(struct work_struct *work)
602{
603 struct rb_node *_n;
604 struct key *key;
605
606go_again:
607
608 spin_lock(&key_serial_lock);
609
610 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
611 key = rb_entry(_n, struct key, serial_node);
612
613 if (atomic_read(&key->usage) == 0)
614 goto found_dead_key;
615 }
616
617 spin_unlock(&key_serial_lock);
618 return;
619
620found_dead_key:
621
622
623 rb_erase(&key->serial_node, &key_serial_tree);
624 spin_unlock(&key_serial_lock);
625
626 key_check(key);
627
628 security_key_free(key);
629
630
631 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
632 spin_lock(&key->user->lock);
633 key->user->qnkeys--;
634 key->user->qnbytes -= key->quotalen;
635 spin_unlock(&key->user->lock);
636 }
637
638 atomic_dec(&key->user->nkeys);
639 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
640 atomic_dec(&key->user->nikeys);
641
642 key_user_put(key->user);
643
644
645 if (key->type->destroy)
646 key->type->destroy(key);
647
648 kfree(key->description);
649
650#ifdef KEY_DEBUGGING
651 key->magic = KEY_DEBUG_MAGIC_X;
652#endif
653 kmem_cache_free(key_jar, key);
654
655
656 goto go_again;
657}
658
659
660
661
662
663
664
665
666
667void key_put(struct key *key)
668{
669 if (key) {
670 key_check(key);
671
672 if (atomic_dec_and_test(&key->usage))
673 schedule_work(&key_cleanup_task);
674 }
675}
676EXPORT_SYMBOL(key_put);
677
678
679
680
681struct key *key_lookup(key_serial_t id)
682{
683 struct rb_node *n;
684 struct key *key;
685
686 spin_lock(&key_serial_lock);
687
688
689 n = key_serial_tree.rb_node;
690 while (n) {
691 key = rb_entry(n, struct key, serial_node);
692
693 if (id < key->serial)
694 n = n->rb_left;
695 else if (id > key->serial)
696 n = n->rb_right;
697 else
698 goto found;
699 }
700
701not_found:
702 key = ERR_PTR(-ENOKEY);
703 goto error;
704
705found:
706
707 if (atomic_read(&key->usage) == 0)
708 goto not_found;
709
710
711
712
713 atomic_inc(&key->usage);
714
715error:
716 spin_unlock(&key_serial_lock);
717 return key;
718}
719
720
721
722
723
724
725
726struct key_type *key_type_lookup(const char *type)
727{
728 struct key_type *ktype;
729
730 down_read(&key_types_sem);
731
732
733
734 list_for_each_entry(ktype, &key_types_list, link) {
735 if (strcmp(ktype->name, type) == 0)
736 goto found_kernel_type;
737 }
738
739 up_read(&key_types_sem);
740 ktype = ERR_PTR(-ENOKEY);
741
742found_kernel_type:
743 return ktype;
744}
745
746
747
748
749void key_type_put(struct key_type *ktype)
750{
751 up_read(&key_types_sem);
752}
753
754
755
756
757
758
759
760static inline key_ref_t __key_update(key_ref_t key_ref,
761 const void *payload, size_t plen)
762{
763 struct key *key = key_ref_to_ptr(key_ref);
764 int ret;
765
766
767 ret = key_permission(key_ref, KEY_WRITE);
768 if (ret < 0)
769 goto error;
770
771 ret = -EEXIST;
772 if (!key->type->update)
773 goto error;
774
775 down_write(&key->sem);
776
777 ret = key->type->update(key, payload, plen);
778 if (ret == 0)
779
780 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
781
782 up_write(&key->sem);
783
784 if (ret < 0)
785 goto error;
786out:
787 return key_ref;
788
789error:
790 key_put(key);
791 key_ref = ERR_PTR(ret);
792 goto out;
793}
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820key_ref_t key_create_or_update(key_ref_t keyring_ref,
821 const char *type,
822 const char *description,
823 const void *payload,
824 size_t plen,
825 key_perm_t perm,
826 unsigned long flags)
827{
828 unsigned long prealloc;
829 const struct cred *cred = current_cred();
830 struct key_type *ktype;
831 struct key *keyring, *key = NULL;
832 key_ref_t key_ref;
833 int ret;
834
835
836
837 ktype = key_type_lookup(type);
838 if (IS_ERR(ktype)) {
839 key_ref = ERR_PTR(-ENODEV);
840 goto error;
841 }
842
843 key_ref = ERR_PTR(-EINVAL);
844 if (!ktype->match || !ktype->instantiate)
845 goto error_2;
846
847 keyring = key_ref_to_ptr(keyring_ref);
848
849 key_check(keyring);
850
851 key_ref = ERR_PTR(-ENOTDIR);
852 if (keyring->type != &key_type_keyring)
853 goto error_2;
854
855 ret = __key_link_begin(keyring, ktype, description, &prealloc);
856 if (ret < 0)
857 goto error_2;
858
859
860
861 ret = key_permission(keyring_ref, KEY_WRITE);
862 if (ret < 0) {
863 key_ref = ERR_PTR(ret);
864 goto error_3;
865 }
866
867
868
869
870
871 if (ktype->update) {
872 key_ref = __keyring_search_one(keyring_ref, ktype, description,
873 0);
874 if (!IS_ERR(key_ref))
875 goto found_matching_key;
876 }
877
878
879 if (perm == KEY_PERM_UNDEF) {
880 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
881 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
882
883 if (ktype->read)
884 perm |= KEY_POS_READ | KEY_USR_READ;
885
886 if (ktype == &key_type_keyring || ktype->update)
887 perm |= KEY_USR_WRITE;
888 }
889
890
891 key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
892 perm, flags);
893 if (IS_ERR(key)) {
894 key_ref = ERR_CAST(key);
895 goto error_3;
896 }
897
898
899 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
900 &prealloc);
901 if (ret < 0) {
902 key_put(key);
903 key_ref = ERR_PTR(ret);
904 goto error_3;
905 }
906
907 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
908
909 error_3:
910 __key_link_end(keyring, ktype, prealloc);
911 error_2:
912 key_type_put(ktype);
913 error:
914 return key_ref;
915
916 found_matching_key:
917
918
919
920 __key_link_end(keyring, ktype, prealloc);
921 key_type_put(ktype);
922
923 key_ref = __key_update(key_ref, payload, plen);
924 goto error;
925}
926EXPORT_SYMBOL(key_create_or_update);
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941int key_update(key_ref_t key_ref, const void *payload, size_t plen)
942{
943 struct key *key = key_ref_to_ptr(key_ref);
944 int ret;
945
946 key_check(key);
947
948
949 ret = key_permission(key_ref, KEY_WRITE);
950 if (ret < 0)
951 goto error;
952
953
954 ret = -EOPNOTSUPP;
955 if (key->type->update) {
956 down_write(&key->sem);
957
958 ret = key->type->update(key, payload, plen);
959 if (ret == 0)
960
961 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
962
963 up_write(&key->sem);
964 }
965
966 error:
967 return ret;
968}
969EXPORT_SYMBOL(key_update);
970
971
972
973
974
975
976
977
978
979
980void key_revoke(struct key *key)
981{
982 struct timespec now;
983 time_t time;
984
985 key_check(key);
986
987
988
989
990
991
992 down_write_nested(&key->sem, 1);
993 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
994 key->type->revoke)
995 key->type->revoke(key);
996
997
998 now = current_kernel_time();
999 time = now.tv_sec;
1000 if (key->revoked_at == 0 || key->revoked_at > time) {
1001 key->revoked_at = time;
1002 key_schedule_gc(key->revoked_at + key_gc_delay);
1003 }
1004
1005 up_write(&key->sem);
1006}
1007EXPORT_SYMBOL(key_revoke);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017int register_key_type(struct key_type *ktype)
1018{
1019 struct key_type *p;
1020 int ret;
1021
1022 ret = -EEXIST;
1023 down_write(&key_types_sem);
1024
1025
1026 list_for_each_entry(p, &key_types_list, link) {
1027 if (strcmp(p->name, ktype->name) == 0)
1028 goto out;
1029 }
1030
1031
1032 list_add(&ktype->link, &key_types_list);
1033 ret = 0;
1034
1035out:
1036 up_write(&key_types_sem);
1037 return ret;
1038}
1039EXPORT_SYMBOL(register_key_type);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void unregister_key_type(struct key_type *ktype)
1050{
1051 struct rb_node *_n;
1052 struct key *key;
1053
1054 down_write(&key_types_sem);
1055
1056
1057 list_del_init(&ktype->link);
1058
1059
1060 spin_lock(&key_serial_lock);
1061
1062 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
1063 key = rb_entry(_n, struct key, serial_node);
1064
1065 if (key->type == ktype) {
1066 key->type = &key_type_dead;
1067 set_bit(KEY_FLAG_DEAD, &key->flags);
1068 }
1069 }
1070
1071 spin_unlock(&key_serial_lock);
1072
1073
1074 synchronize_rcu();
1075
1076
1077
1078 spin_lock(&key_serial_lock);
1079
1080 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
1081 key = rb_entry(_n, struct key, serial_node);
1082
1083 if (key->type == ktype) {
1084 if (ktype->destroy)
1085 ktype->destroy(key);
1086 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
1087 }
1088 }
1089
1090 spin_unlock(&key_serial_lock);
1091 up_write(&key_types_sem);
1092
1093 key_schedule_gc(0);
1094}
1095EXPORT_SYMBOL(unregister_key_type);
1096
1097
1098
1099
1100void __init key_init(void)
1101{
1102
1103 key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1104 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1105
1106
1107 list_add_tail(&key_type_keyring.link, &key_types_list);
1108 list_add_tail(&key_type_dead.link, &key_types_list);
1109 list_add_tail(&key_type_user.link, &key_types_list);
1110
1111
1112 rb_link_node(&root_key_user.node,
1113 NULL,
1114 &key_user_tree.rb_node);
1115
1116 rb_insert_color(&root_key_user.node,
1117 &key_user_tree);
1118}
1119