1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/poison.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/security.h>
18#include <linux/workqueue.h>
19#include <linux/random.h>
20#include <linux/err.h>
21#include "internal.h"
22
23struct kmem_cache *key_jar;
24struct rb_root key_serial_tree;
25DEFINE_SPINLOCK(key_serial_lock);
26
27struct rb_root key_user_tree;
28DEFINE_SPINLOCK(key_user_lock);
29
30unsigned int key_quota_root_maxkeys = 1000000;
31unsigned int key_quota_root_maxbytes = 25000000;
32unsigned int key_quota_maxkeys = 200;
33unsigned int key_quota_maxbytes = 20000;
34
35static LIST_HEAD(key_types_list);
36static DECLARE_RWSEM(key_types_sem);
37
38
39DEFINE_MUTEX(key_construction_mutex);
40
41#ifdef KEY_DEBUGGING
42void __key_check(const struct key *key)
43{
44 printk("__key_check: key %p {%08x} should be {%08x}\n",
45 key, key->magic, KEY_DEBUG_MAGIC);
46 BUG();
47}
48#endif
49
50
51
52
53
54struct key_user *key_user_lookup(kuid_t uid)
55{
56 struct key_user *candidate = NULL, *user;
57 struct rb_node *parent = NULL;
58 struct rb_node **p;
59
60try_again:
61 p = &key_user_tree.rb_node;
62 spin_lock(&key_user_lock);
63
64
65 while (*p) {
66 parent = *p;
67 user = rb_entry(parent, struct key_user, node);
68
69 if (uid_lt(uid, user->uid))
70 p = &(*p)->rb_left;
71 else if (uid_gt(uid, user->uid))
72 p = &(*p)->rb_right;
73 else
74 goto found;
75 }
76
77
78 if (!candidate) {
79
80
81 spin_unlock(&key_user_lock);
82
83 user = NULL;
84 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
85 if (unlikely(!candidate))
86 goto out;
87
88
89
90
91 goto try_again;
92 }
93
94
95
96 atomic_set(&candidate->usage, 1);
97 atomic_set(&candidate->nkeys, 0);
98 atomic_set(&candidate->nikeys, 0);
99 candidate->uid = uid;
100 candidate->qnkeys = 0;
101 candidate->qnbytes = 0;
102 spin_lock_init(&candidate->lock);
103 mutex_init(&candidate->cons_lock);
104
105 rb_link_node(&candidate->node, parent, p);
106 rb_insert_color(&candidate->node, &key_user_tree);
107 spin_unlock(&key_user_lock);
108 user = candidate;
109 goto out;
110
111
112found:
113 atomic_inc(&user->usage);
114 spin_unlock(&key_user_lock);
115 kfree(candidate);
116out:
117 return user;
118}
119
120
121
122
123void key_user_put(struct key_user *user)
124{
125 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
126 rb_erase(&user->node, &key_user_tree);
127 spin_unlock(&key_user_lock);
128
129 kfree(user);
130 }
131}
132
133
134
135
136
137static inline void key_alloc_serial(struct key *key)
138{
139 struct rb_node *parent, **p;
140 struct key *xkey;
141
142
143
144 do {
145 get_random_bytes(&key->serial, sizeof(key->serial));
146
147 key->serial >>= 1;
148 } while (key->serial < 3);
149
150 spin_lock(&key_serial_lock);
151
152attempt_insertion:
153 parent = NULL;
154 p = &key_serial_tree.rb_node;
155
156 while (*p) {
157 parent = *p;
158 xkey = rb_entry(parent, struct key, serial_node);
159
160 if (key->serial < xkey->serial)
161 p = &(*p)->rb_left;
162 else if (key->serial > xkey->serial)
163 p = &(*p)->rb_right;
164 else
165 goto serial_exists;
166 }
167
168
169 rb_link_node(&key->serial_node, parent, p);
170 rb_insert_color(&key->serial_node, &key_serial_tree);
171
172 spin_unlock(&key_serial_lock);
173 return;
174
175
176
177serial_exists:
178 for (;;) {
179 key->serial++;
180 if (key->serial < 3) {
181 key->serial = 3;
182 goto attempt_insertion;
183 }
184
185 parent = rb_next(parent);
186 if (!parent)
187 goto attempt_insertion;
188
189 xkey = rb_entry(parent, struct key, serial_node);
190 if (key->serial < xkey->serial)
191 goto attempt_insertion;
192 }
193}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct key *key_alloc(struct key_type *type, const char *desc,
226 kuid_t uid, kgid_t gid, const struct cred *cred,
227 key_perm_t perm, unsigned long flags,
228 int (*restrict_link)(struct key *,
229 const struct key_type *,
230 const union key_payload *))
231{
232 struct key_user *user = NULL;
233 struct key *key;
234 size_t desclen, quotalen;
235 int ret;
236
237 key = ERR_PTR(-EINVAL);
238 if (!desc || !*desc)
239 goto error;
240
241 if (type->vet_description) {
242 ret = type->vet_description(desc);
243 if (ret < 0) {
244 key = ERR_PTR(ret);
245 goto error;
246 }
247 }
248
249 desclen = strlen(desc);
250 quotalen = desclen + 1 + type->def_datalen;
251
252
253 user = key_user_lookup(uid);
254 if (!user)
255 goto no_memory_1;
256
257
258
259 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
260 unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
261 key_quota_root_maxkeys : key_quota_maxkeys;
262 unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
263 key_quota_root_maxbytes : key_quota_maxbytes;
264
265 spin_lock(&user->lock);
266 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
267 if (user->qnkeys + 1 >= maxkeys ||
268 user->qnbytes + quotalen >= maxbytes ||
269 user->qnbytes + quotalen < user->qnbytes)
270 goto no_quota;
271 }
272
273 user->qnkeys++;
274 user->qnbytes += quotalen;
275 spin_unlock(&user->lock);
276 }
277
278
279 key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
280 if (!key)
281 goto no_memory_2;
282
283 key->index_key.desc_len = desclen;
284 key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
285 if (!key->index_key.description)
286 goto no_memory_3;
287
288 atomic_set(&key->usage, 1);
289 init_rwsem(&key->sem);
290 lockdep_set_class(&key->sem, &type->lock_class);
291 key->index_key.type = type;
292 key->user = user;
293 key->quotalen = quotalen;
294 key->datalen = type->def_datalen;
295 key->uid = uid;
296 key->gid = gid;
297 key->perm = perm;
298 key->restrict_link = restrict_link;
299
300 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
301 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
302 if (flags & KEY_ALLOC_BUILT_IN)
303 key->flags |= 1 << KEY_FLAG_BUILTIN;
304
305#ifdef KEY_DEBUGGING
306 key->magic = KEY_DEBUG_MAGIC;
307#endif
308
309
310 ret = security_key_alloc(key, cred, flags);
311 if (ret < 0)
312 goto security_error;
313
314
315 atomic_inc(&user->nkeys);
316 key_alloc_serial(key);
317
318error:
319 return key;
320
321security_error:
322 kfree(key->description);
323 kmem_cache_free(key_jar, key);
324 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
325 spin_lock(&user->lock);
326 user->qnkeys--;
327 user->qnbytes -= quotalen;
328 spin_unlock(&user->lock);
329 }
330 key_user_put(user);
331 key = ERR_PTR(ret);
332 goto error;
333
334no_memory_3:
335 kmem_cache_free(key_jar, key);
336no_memory_2:
337 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
338 spin_lock(&user->lock);
339 user->qnkeys--;
340 user->qnbytes -= quotalen;
341 spin_unlock(&user->lock);
342 }
343 key_user_put(user);
344no_memory_1:
345 key = ERR_PTR(-ENOMEM);
346 goto error;
347
348no_quota:
349 spin_unlock(&user->lock);
350 key_user_put(user);
351 key = ERR_PTR(-EDQUOT);
352 goto error;
353}
354EXPORT_SYMBOL(key_alloc);
355
356
357
358
359
360
361
362
363
364
365
366
367int key_payload_reserve(struct key *key, size_t datalen)
368{
369 int delta = (int)datalen - key->datalen;
370 int ret = 0;
371
372 key_check(key);
373
374
375 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
376 unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
377 key_quota_root_maxbytes : key_quota_maxbytes;
378
379 spin_lock(&key->user->lock);
380
381 if (delta > 0 &&
382 (key->user->qnbytes + delta >= maxbytes ||
383 key->user->qnbytes + delta < key->user->qnbytes)) {
384 ret = -EDQUOT;
385 }
386 else {
387 key->user->qnbytes += delta;
388 key->quotalen += delta;
389 }
390 spin_unlock(&key->user->lock);
391 }
392
393
394 if (ret == 0)
395 key->datalen = datalen;
396
397 return ret;
398}
399EXPORT_SYMBOL(key_payload_reserve);
400
401
402
403
404
405
406
407static int __key_instantiate_and_link(struct key *key,
408 struct key_preparsed_payload *prep,
409 struct key *keyring,
410 struct key *authkey,
411 struct assoc_array_edit **_edit)
412{
413 int ret, awaken;
414
415 key_check(key);
416 key_check(keyring);
417
418 awaken = 0;
419 ret = -EBUSY;
420
421 mutex_lock(&key_construction_mutex);
422
423
424 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
425
426 ret = key->type->instantiate(key, prep);
427
428 if (ret == 0) {
429
430 atomic_inc(&key->user->nikeys);
431 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
432
433 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
434 awaken = 1;
435
436
437 if (keyring) {
438 if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
439 set_bit(KEY_FLAG_KEEP, &key->flags);
440
441 __key_link(key, _edit);
442 }
443
444
445 if (authkey)
446 key_revoke(authkey);
447
448 if (prep->expiry != TIME_T_MAX) {
449 key->expiry = prep->expiry;
450 key_schedule_gc(prep->expiry + key_gc_delay);
451 }
452 }
453 }
454
455 mutex_unlock(&key_construction_mutex);
456
457
458 if (awaken)
459 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
460
461 return ret;
462}
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480int key_instantiate_and_link(struct key *key,
481 const void *data,
482 size_t datalen,
483 struct key *keyring,
484 struct key *authkey)
485{
486 struct key_preparsed_payload prep;
487 struct assoc_array_edit *edit;
488 int ret;
489
490 memset(&prep, 0, sizeof(prep));
491 prep.data = data;
492 prep.datalen = datalen;
493 prep.quotalen = key->type->def_datalen;
494 prep.expiry = TIME_T_MAX;
495 if (key->type->preparse) {
496 ret = key->type->preparse(&prep);
497 if (ret < 0)
498 goto error;
499 }
500
501 if (keyring) {
502 if (keyring->restrict_link) {
503 ret = keyring->restrict_link(keyring, key->type,
504 &prep.payload);
505 if (ret < 0)
506 goto error;
507 }
508 ret = __key_link_begin(keyring, &key->index_key, &edit);
509 if (ret < 0)
510 goto error;
511 }
512
513 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
514
515 if (keyring)
516 __key_link_end(keyring, &key->index_key, edit);
517
518error:
519 if (key->type->preparse)
520 key->type->free_preparse(&prep);
521 return ret;
522}
523
524EXPORT_SYMBOL(key_instantiate_and_link);
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547int key_reject_and_link(struct key *key,
548 unsigned timeout,
549 unsigned error,
550 struct key *keyring,
551 struct key *authkey)
552{
553 struct assoc_array_edit *edit;
554 struct timespec now;
555 int ret, awaken, link_ret = 0;
556
557 key_check(key);
558 key_check(keyring);
559
560 awaken = 0;
561 ret = -EBUSY;
562
563 if (keyring) {
564 if (keyring->restrict_link)
565 return -EPERM;
566
567 link_ret = __key_link_begin(keyring, &key->index_key, &edit);
568 }
569
570 mutex_lock(&key_construction_mutex);
571
572
573 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
574
575 atomic_inc(&key->user->nikeys);
576 key->reject_error = -error;
577 smp_wmb();
578 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
579 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
580 now = current_kernel_time();
581 key->expiry = now.tv_sec + timeout;
582 key_schedule_gc(key->expiry + key_gc_delay);
583
584 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
585 awaken = 1;
586
587 ret = 0;
588
589
590 if (keyring && link_ret == 0)
591 __key_link(key, &edit);
592
593
594 if (authkey)
595 key_revoke(authkey);
596 }
597
598 mutex_unlock(&key_construction_mutex);
599
600 if (keyring && link_ret == 0)
601 __key_link_end(keyring, &key->index_key, edit);
602
603
604 if (awaken)
605 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
606
607 return ret == 0 ? link_ret : ret;
608}
609EXPORT_SYMBOL(key_reject_and_link);
610
611
612
613
614
615
616
617
618
619void key_put(struct key *key)
620{
621 if (key) {
622 key_check(key);
623
624 if (atomic_dec_and_test(&key->usage))
625 schedule_work(&key_gc_work);
626 }
627}
628EXPORT_SYMBOL(key_put);
629
630
631
632
633struct key *key_lookup(key_serial_t id)
634{
635 struct rb_node *n;
636 struct key *key;
637
638 spin_lock(&key_serial_lock);
639
640
641 n = key_serial_tree.rb_node;
642 while (n) {
643 key = rb_entry(n, struct key, serial_node);
644
645 if (id < key->serial)
646 n = n->rb_left;
647 else if (id > key->serial)
648 n = n->rb_right;
649 else
650 goto found;
651 }
652
653not_found:
654 key = ERR_PTR(-ENOKEY);
655 goto error;
656
657found:
658
659 if (atomic_read(&key->usage) == 0)
660 goto not_found;
661
662
663
664
665 __key_get(key);
666
667error:
668 spin_unlock(&key_serial_lock);
669 return key;
670}
671
672
673
674
675
676
677
678struct key_type *key_type_lookup(const char *type)
679{
680 struct key_type *ktype;
681
682 down_read(&key_types_sem);
683
684
685
686 list_for_each_entry(ktype, &key_types_list, link) {
687 if (strcmp(ktype->name, type) == 0)
688 goto found_kernel_type;
689 }
690
691 up_read(&key_types_sem);
692 ktype = ERR_PTR(-ENOKEY);
693
694found_kernel_type:
695 return ktype;
696}
697
698void key_set_timeout(struct key *key, unsigned timeout)
699{
700 struct timespec now;
701 time_t expiry = 0;
702
703
704 down_write(&key->sem);
705
706 if (timeout > 0) {
707 now = current_kernel_time();
708 expiry = now.tv_sec + timeout;
709 }
710
711 key->expiry = expiry;
712 key_schedule_gc(key->expiry + key_gc_delay);
713
714 up_write(&key->sem);
715}
716EXPORT_SYMBOL_GPL(key_set_timeout);
717
718
719
720
721void key_type_put(struct key_type *ktype)
722{
723 up_read(&key_types_sem);
724}
725
726
727
728
729
730
731
732static inline key_ref_t __key_update(key_ref_t key_ref,
733 struct key_preparsed_payload *prep)
734{
735 struct key *key = key_ref_to_ptr(key_ref);
736 int ret;
737
738
739 ret = key_permission(key_ref, KEY_NEED_WRITE);
740 if (ret < 0)
741 goto error;
742
743 ret = -EEXIST;
744 if (!key->type->update)
745 goto error;
746
747 down_write(&key->sem);
748
749 ret = key->type->update(key, prep);
750 if (ret == 0)
751
752 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
753
754 up_write(&key->sem);
755
756 if (ret < 0)
757 goto error;
758out:
759 return key_ref;
760
761error:
762 key_put(key);
763 key_ref = ERR_PTR(ret);
764 goto out;
765}
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792key_ref_t key_create_or_update(key_ref_t keyring_ref,
793 const char *type,
794 const char *description,
795 const void *payload,
796 size_t plen,
797 key_perm_t perm,
798 unsigned long flags)
799{
800 struct keyring_index_key index_key = {
801 .description = description,
802 };
803 struct key_preparsed_payload prep;
804 struct assoc_array_edit *edit;
805 const struct cred *cred = current_cred();
806 struct key *keyring, *key = NULL;
807 key_ref_t key_ref;
808 int ret;
809 int (*restrict_link)(struct key *,
810 const struct key_type *,
811 const union key_payload *) = NULL;
812
813
814
815 index_key.type = key_type_lookup(type);
816 if (IS_ERR(index_key.type)) {
817 key_ref = ERR_PTR(-ENODEV);
818 goto error;
819 }
820
821 key_ref = ERR_PTR(-EINVAL);
822 if (!index_key.type->instantiate ||
823 (!index_key.description && !index_key.type->preparse))
824 goto error_put_type;
825
826 keyring = key_ref_to_ptr(keyring_ref);
827
828 key_check(keyring);
829
830 key_ref = ERR_PTR(-EPERM);
831 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
832 restrict_link = keyring->restrict_link;
833
834 key_ref = ERR_PTR(-ENOTDIR);
835 if (keyring->type != &key_type_keyring)
836 goto error_put_type;
837
838 memset(&prep, 0, sizeof(prep));
839 prep.data = payload;
840 prep.datalen = plen;
841 prep.quotalen = index_key.type->def_datalen;
842 prep.expiry = TIME_T_MAX;
843 if (index_key.type->preparse) {
844 ret = index_key.type->preparse(&prep);
845 if (ret < 0) {
846 key_ref = ERR_PTR(ret);
847 goto error_free_prep;
848 }
849 if (!index_key.description)
850 index_key.description = prep.description;
851 key_ref = ERR_PTR(-EINVAL);
852 if (!index_key.description)
853 goto error_free_prep;
854 }
855 index_key.desc_len = strlen(index_key.description);
856
857 if (restrict_link) {
858 ret = restrict_link(keyring, index_key.type, &prep.payload);
859 if (ret < 0) {
860 key_ref = ERR_PTR(ret);
861 goto error_free_prep;
862 }
863 }
864
865 ret = __key_link_begin(keyring, &index_key, &edit);
866 if (ret < 0) {
867 key_ref = ERR_PTR(ret);
868 goto error_free_prep;
869 }
870
871
872
873 ret = key_permission(keyring_ref, KEY_NEED_WRITE);
874 if (ret < 0) {
875 key_ref = ERR_PTR(ret);
876 goto error_link_end;
877 }
878
879
880
881
882
883 if (index_key.type->update) {
884 key_ref = find_key_to_update(keyring_ref, &index_key);
885 if (key_ref)
886 goto found_matching_key;
887 }
888
889
890 if (perm == KEY_PERM_UNDEF) {
891 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
892 perm |= KEY_USR_VIEW;
893
894 if (index_key.type->read)
895 perm |= KEY_POS_READ;
896
897 if (index_key.type == &key_type_keyring ||
898 index_key.type->update)
899 perm |= KEY_POS_WRITE;
900 }
901
902
903 key = key_alloc(index_key.type, index_key.description,
904 cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
905 if (IS_ERR(key)) {
906 key_ref = ERR_CAST(key);
907 goto error_link_end;
908 }
909
910
911 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
912 if (ret < 0) {
913 key_put(key);
914 key_ref = ERR_PTR(ret);
915 goto error_link_end;
916 }
917
918 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
919
920error_link_end:
921 __key_link_end(keyring, &index_key, edit);
922error_free_prep:
923 if (index_key.type->preparse)
924 index_key.type->free_preparse(&prep);
925error_put_type:
926 key_type_put(index_key.type);
927error:
928 return key_ref;
929
930 found_matching_key:
931
932
933
934 __key_link_end(keyring, &index_key, edit);
935
936 key_ref = __key_update(key_ref, &prep);
937 goto error_free_prep;
938}
939EXPORT_SYMBOL(key_create_or_update);
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954int key_update(key_ref_t key_ref, const void *payload, size_t plen)
955{
956 struct key_preparsed_payload prep;
957 struct key *key = key_ref_to_ptr(key_ref);
958 int ret;
959
960 key_check(key);
961
962
963 ret = key_permission(key_ref, KEY_NEED_WRITE);
964 if (ret < 0)
965 goto error;
966
967
968 ret = -EOPNOTSUPP;
969 if (!key->type->update)
970 goto error;
971
972 memset(&prep, 0, sizeof(prep));
973 prep.data = payload;
974 prep.datalen = plen;
975 prep.quotalen = key->type->def_datalen;
976 prep.expiry = TIME_T_MAX;
977 if (key->type->preparse) {
978 ret = key->type->preparse(&prep);
979 if (ret < 0)
980 goto error;
981 }
982
983 down_write(&key->sem);
984
985 ret = key->type->update(key, &prep);
986 if (ret == 0)
987
988 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
989
990 up_write(&key->sem);
991
992error:
993 if (key->type->preparse)
994 key->type->free_preparse(&prep);
995 return ret;
996}
997EXPORT_SYMBOL(key_update);
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008void key_revoke(struct key *key)
1009{
1010 struct timespec now;
1011 time_t time;
1012
1013 key_check(key);
1014
1015
1016
1017
1018
1019
1020 down_write_nested(&key->sem, 1);
1021 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
1022 key->type->revoke)
1023 key->type->revoke(key);
1024
1025
1026 now = current_kernel_time();
1027 time = now.tv_sec;
1028 if (key->revoked_at == 0 || key->revoked_at > time) {
1029 key->revoked_at = time;
1030 key_schedule_gc(key->revoked_at + key_gc_delay);
1031 }
1032
1033 up_write(&key->sem);
1034}
1035EXPORT_SYMBOL(key_revoke);
1036
1037
1038
1039
1040
1041
1042
1043
1044void key_invalidate(struct key *key)
1045{
1046 kenter("%d", key_serial(key));
1047
1048 key_check(key);
1049
1050 if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
1051 down_write_nested(&key->sem, 1);
1052 if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
1053 key_schedule_gc_links();
1054 up_write(&key->sem);
1055 }
1056}
1057EXPORT_SYMBOL(key_invalidate);
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
1070{
1071 int ret;
1072
1073 pr_devel("==>%s()\n", __func__);
1074
1075 ret = key_payload_reserve(key, prep->quotalen);
1076 if (ret == 0) {
1077 rcu_assign_keypointer(key, prep->payload.data[0]);
1078 key->payload.data[1] = prep->payload.data[1];
1079 key->payload.data[2] = prep->payload.data[2];
1080 key->payload.data[3] = prep->payload.data[3];
1081 prep->payload.data[0] = NULL;
1082 prep->payload.data[1] = NULL;
1083 prep->payload.data[2] = NULL;
1084 prep->payload.data[3] = NULL;
1085 }
1086 pr_devel("<==%s() = %d\n", __func__, ret);
1087 return ret;
1088}
1089EXPORT_SYMBOL(generic_key_instantiate);
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099int register_key_type(struct key_type *ktype)
1100{
1101 struct key_type *p;
1102 int ret;
1103
1104 memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
1105
1106 ret = -EEXIST;
1107 down_write(&key_types_sem);
1108
1109
1110 list_for_each_entry(p, &key_types_list, link) {
1111 if (strcmp(p->name, ktype->name) == 0)
1112 goto out;
1113 }
1114
1115
1116 list_add(&ktype->link, &key_types_list);
1117
1118 pr_notice("Key type %s registered\n", ktype->name);
1119 ret = 0;
1120
1121out:
1122 up_write(&key_types_sem);
1123 return ret;
1124}
1125EXPORT_SYMBOL(register_key_type);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135void unregister_key_type(struct key_type *ktype)
1136{
1137 down_write(&key_types_sem);
1138 list_del_init(&ktype->link);
1139 downgrade_write(&key_types_sem);
1140 key_gc_keytype(ktype);
1141 pr_notice("Key type %s unregistered\n", ktype->name);
1142 up_read(&key_types_sem);
1143}
1144EXPORT_SYMBOL(unregister_key_type);
1145
1146
1147
1148
1149void __init key_init(void)
1150{
1151
1152 key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1153 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1154
1155
1156 list_add_tail(&key_type_keyring.link, &key_types_list);
1157 list_add_tail(&key_type_dead.link, &key_types_list);
1158 list_add_tail(&key_type_user.link, &key_types_list);
1159 list_add_tail(&key_type_logon.link, &key_types_list);
1160
1161
1162 rb_link_node(&root_key_user.node,
1163 NULL,
1164 &key_user_tree.rb_node);
1165
1166 rb_insert_color(&root_key_user.node,
1167 &key_user_tree);
1168}
1169