1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/rcupdate.h>
27#include <linux/list.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/jhash.h>
31#include <linux/audit.h>
32#include <linux/slab.h>
33#include <net/ip.h>
34#include <net/icmp.h>
35#include <net/tcp.h>
36#include <net/netlabel.h>
37#include <net/cipso_ipv4.h>
38#include <linux/atomic.h>
39#include <linux/bug.h>
40#include <asm/unaligned.h>
41
42
43
44
45
46
47static DEFINE_SPINLOCK(cipso_v4_doi_list_lock);
48static LIST_HEAD(cipso_v4_doi_list);
49
50
51int cipso_v4_cache_enabled = 1;
52int cipso_v4_cache_bucketsize = 10;
53#define CIPSO_V4_CACHE_BUCKETBITS 7
54#define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS)
55#define CIPSO_V4_CACHE_REORDERLIMIT 10
56struct cipso_v4_map_cache_bkt {
57 spinlock_t lock;
58 u32 size;
59 struct list_head list;
60};
61
62struct cipso_v4_map_cache_entry {
63 u32 hash;
64 unsigned char *key;
65 size_t key_len;
66
67 struct netlbl_lsm_cache *lsm_data;
68
69 u32 activity;
70 struct list_head list;
71};
72
73static struct cipso_v4_map_cache_bkt *cipso_v4_cache;
74
75
76int cipso_v4_rbm_optfmt = 0;
77int cipso_v4_rbm_strictvalid = 1;
78
79
80
81
82
83
84
85#define CIPSO_V4_OPT_LEN_MAX 40
86
87
88
89#define CIPSO_V4_HDR_LEN 6
90
91
92#define CIPSO_V4_TAG_RBM_BLEN 4
93
94
95#define CIPSO_V4_TAG_ENUM_BLEN 4
96
97
98#define CIPSO_V4_TAG_RNG_BLEN 4
99
100
101
102
103
104#define CIPSO_V4_TAG_RNG_CAT_MAX 8
105
106
107
108
109
110
111
112
113
114
115
116
117#define CIPSO_V4_TAG_LOC_BLEN 6
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
133{
134 if (entry->lsm_data)
135 netlbl_secattr_cache_free(entry->lsm_data);
136 kfree(entry->key);
137 kfree(entry);
138}
139
140
141
142
143
144
145
146
147
148
149static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
150{
151 return jhash(key, key_len, 0);
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static int __init cipso_v4_cache_init(void)
168{
169 u32 iter;
170
171 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
172 sizeof(struct cipso_v4_map_cache_bkt),
173 GFP_KERNEL);
174 if (!cipso_v4_cache)
175 return -ENOMEM;
176
177 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
178 spin_lock_init(&cipso_v4_cache[iter].lock);
179 cipso_v4_cache[iter].size = 0;
180 INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
181 }
182
183 return 0;
184}
185
186
187
188
189
190
191
192
193void cipso_v4_cache_invalidate(void)
194{
195 struct cipso_v4_map_cache_entry *entry, *tmp_entry;
196 u32 iter;
197
198 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
199 spin_lock_bh(&cipso_v4_cache[iter].lock);
200 list_for_each_entry_safe(entry,
201 tmp_entry,
202 &cipso_v4_cache[iter].list, list) {
203 list_del(&entry->list);
204 cipso_v4_cache_entry_free(entry);
205 }
206 cipso_v4_cache[iter].size = 0;
207 spin_unlock_bh(&cipso_v4_cache[iter].lock);
208 }
209}
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233static int cipso_v4_cache_check(const unsigned char *key,
234 u32 key_len,
235 struct netlbl_lsm_secattr *secattr)
236{
237 u32 bkt;
238 struct cipso_v4_map_cache_entry *entry;
239 struct cipso_v4_map_cache_entry *prev_entry = NULL;
240 u32 hash;
241
242 if (!cipso_v4_cache_enabled)
243 return -ENOENT;
244
245 hash = cipso_v4_map_cache_hash(key, key_len);
246 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
247 spin_lock_bh(&cipso_v4_cache[bkt].lock);
248 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
249 if (entry->hash == hash &&
250 entry->key_len == key_len &&
251 memcmp(entry->key, key, key_len) == 0) {
252 entry->activity += 1;
253 refcount_inc(&entry->lsm_data->refcount);
254 secattr->cache = entry->lsm_data;
255 secattr->flags |= NETLBL_SECATTR_CACHE;
256 secattr->type = NETLBL_NLTYPE_CIPSOV4;
257 if (!prev_entry) {
258 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
259 return 0;
260 }
261
262 if (prev_entry->activity > 0)
263 prev_entry->activity -= 1;
264 if (entry->activity > prev_entry->activity &&
265 entry->activity - prev_entry->activity >
266 CIPSO_V4_CACHE_REORDERLIMIT) {
267 __list_del(entry->list.prev, entry->list.next);
268 __list_add(&entry->list,
269 prev_entry->list.prev,
270 &prev_entry->list);
271 }
272
273 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
274 return 0;
275 }
276 prev_entry = entry;
277 }
278 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
279
280 return -ENOENT;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296int cipso_v4_cache_add(const unsigned char *cipso_ptr,
297 const struct netlbl_lsm_secattr *secattr)
298{
299 int ret_val = -EPERM;
300 u32 bkt;
301 struct cipso_v4_map_cache_entry *entry = NULL;
302 struct cipso_v4_map_cache_entry *old_entry = NULL;
303 u32 cipso_ptr_len;
304
305 if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
306 return 0;
307
308 cipso_ptr_len = cipso_ptr[1];
309
310 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
311 if (!entry)
312 return -ENOMEM;
313 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
314 if (!entry->key) {
315 ret_val = -ENOMEM;
316 goto cache_add_failure;
317 }
318 entry->key_len = cipso_ptr_len;
319 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
320 refcount_inc(&secattr->cache->refcount);
321 entry->lsm_data = secattr->cache;
322
323 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
324 spin_lock_bh(&cipso_v4_cache[bkt].lock);
325 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
326 list_add(&entry->list, &cipso_v4_cache[bkt].list);
327 cipso_v4_cache[bkt].size += 1;
328 } else {
329 old_entry = list_entry(cipso_v4_cache[bkt].list.prev,
330 struct cipso_v4_map_cache_entry, list);
331 list_del(&old_entry->list);
332 list_add(&entry->list, &cipso_v4_cache[bkt].list);
333 cipso_v4_cache_entry_free(old_entry);
334 }
335 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
336
337 return 0;
338
339cache_add_failure:
340 if (entry)
341 cipso_v4_cache_entry_free(entry);
342 return ret_val;
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
359{
360 struct cipso_v4_doi *iter;
361
362 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
363 if (iter->doi == doi && refcount_read(&iter->refcount))
364 return iter;
365 return NULL;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
382 struct netlbl_audit *audit_info)
383{
384 int ret_val = -EINVAL;
385 u32 iter;
386 u32 doi;
387 u32 doi_type;
388 struct audit_buffer *audit_buf;
389
390 doi = doi_def->doi;
391 doi_type = doi_def->type;
392
393 if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
394 goto doi_add_return;
395 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
396 switch (doi_def->tags[iter]) {
397 case CIPSO_V4_TAG_RBITMAP:
398 break;
399 case CIPSO_V4_TAG_RANGE:
400 case CIPSO_V4_TAG_ENUM:
401 if (doi_def->type != CIPSO_V4_MAP_PASS)
402 goto doi_add_return;
403 break;
404 case CIPSO_V4_TAG_LOCAL:
405 if (doi_def->type != CIPSO_V4_MAP_LOCAL)
406 goto doi_add_return;
407 break;
408 case CIPSO_V4_TAG_INVALID:
409 if (iter == 0)
410 goto doi_add_return;
411 break;
412 default:
413 goto doi_add_return;
414 }
415 }
416
417 refcount_set(&doi_def->refcount, 1);
418
419 spin_lock(&cipso_v4_doi_list_lock);
420 if (cipso_v4_doi_search(doi_def->doi)) {
421 spin_unlock(&cipso_v4_doi_list_lock);
422 ret_val = -EEXIST;
423 goto doi_add_return;
424 }
425 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
426 spin_unlock(&cipso_v4_doi_list_lock);
427 ret_val = 0;
428
429doi_add_return:
430 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
431 if (audit_buf) {
432 const char *type_str;
433 switch (doi_type) {
434 case CIPSO_V4_MAP_TRANS:
435 type_str = "trans";
436 break;
437 case CIPSO_V4_MAP_PASS:
438 type_str = "pass";
439 break;
440 case CIPSO_V4_MAP_LOCAL:
441 type_str = "local";
442 break;
443 default:
444 type_str = "(unknown)";
445 }
446 audit_log_format(audit_buf,
447 " cipso_doi=%u cipso_type=%s res=%u",
448 doi, type_str, ret_val == 0 ? 1 : 0);
449 audit_log_end(audit_buf);
450 }
451
452 return ret_val;
453}
454
455
456
457
458
459
460
461
462
463void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
464{
465 if (!doi_def)
466 return;
467
468 if (doi_def->map.std) {
469 switch (doi_def->type) {
470 case CIPSO_V4_MAP_TRANS:
471 kfree(doi_def->map.std->lvl.cipso);
472 kfree(doi_def->map.std->lvl.local);
473 kfree(doi_def->map.std->cat.cipso);
474 kfree(doi_def->map.std->cat.local);
475 kfree(doi_def->map.std);
476 break;
477 }
478 }
479 kfree(doi_def);
480}
481
482
483
484
485
486
487
488
489
490
491
492static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
493{
494 struct cipso_v4_doi *doi_def;
495
496 doi_def = container_of(entry, struct cipso_v4_doi, rcu);
497 cipso_v4_doi_free(doi_def);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
512{
513 int ret_val;
514 struct cipso_v4_doi *doi_def;
515 struct audit_buffer *audit_buf;
516
517 spin_lock(&cipso_v4_doi_list_lock);
518 doi_def = cipso_v4_doi_search(doi);
519 if (!doi_def) {
520 spin_unlock(&cipso_v4_doi_list_lock);
521 ret_val = -ENOENT;
522 goto doi_remove_return;
523 }
524 list_del_rcu(&doi_def->list);
525 spin_unlock(&cipso_v4_doi_list_lock);
526
527 cipso_v4_doi_putdef(doi_def);
528 ret_val = 0;
529
530doi_remove_return:
531 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
532 if (audit_buf) {
533 audit_log_format(audit_buf,
534 " cipso_doi=%u res=%u",
535 doi, ret_val == 0 ? 1 : 0);
536 audit_log_end(audit_buf);
537 }
538
539 return ret_val;
540}
541
542
543
544
545
546
547
548
549
550
551
552
553struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
554{
555 struct cipso_v4_doi *doi_def;
556
557 rcu_read_lock();
558 doi_def = cipso_v4_doi_search(doi);
559 if (!doi_def)
560 goto doi_getdef_return;
561 if (!refcount_inc_not_zero(&doi_def->refcount))
562 doi_def = NULL;
563
564doi_getdef_return:
565 rcu_read_unlock();
566 return doi_def;
567}
568
569
570
571
572
573
574
575
576
577void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
578{
579 if (!doi_def)
580 return;
581
582 if (!refcount_dec_and_test(&doi_def->refcount))
583 return;
584
585 cipso_v4_cache_invalidate();
586 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602int cipso_v4_doi_walk(u32 *skip_cnt,
603 int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
604 void *cb_arg)
605{
606 int ret_val = -ENOENT;
607 u32 doi_cnt = 0;
608 struct cipso_v4_doi *iter_doi;
609
610 rcu_read_lock();
611 list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
612 if (refcount_read(&iter_doi->refcount) > 0) {
613 if (doi_cnt++ < *skip_cnt)
614 continue;
615 ret_val = callback(iter_doi, cb_arg);
616 if (ret_val < 0) {
617 doi_cnt--;
618 goto doi_walk_return;
619 }
620 }
621
622doi_walk_return:
623 rcu_read_unlock();
624 *skip_cnt = doi_cnt;
625 return ret_val;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
644{
645 switch (doi_def->type) {
646 case CIPSO_V4_MAP_PASS:
647 return 0;
648 case CIPSO_V4_MAP_TRANS:
649 if ((level < doi_def->map.std->lvl.cipso_size) &&
650 (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
651 return 0;
652 break;
653 }
654
655 return -EFAULT;
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
671 u32 host_lvl,
672 u32 *net_lvl)
673{
674 switch (doi_def->type) {
675 case CIPSO_V4_MAP_PASS:
676 *net_lvl = host_lvl;
677 return 0;
678 case CIPSO_V4_MAP_TRANS:
679 if (host_lvl < doi_def->map.std->lvl.local_size &&
680 doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
681 *net_lvl = doi_def->map.std->lvl.local[host_lvl];
682 return 0;
683 }
684 return -EPERM;
685 }
686
687 return -EINVAL;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
703 u32 net_lvl,
704 u32 *host_lvl)
705{
706 struct cipso_v4_std_map_tbl *map_tbl;
707
708 switch (doi_def->type) {
709 case CIPSO_V4_MAP_PASS:
710 *host_lvl = net_lvl;
711 return 0;
712 case CIPSO_V4_MAP_TRANS:
713 map_tbl = doi_def->map.std;
714 if (net_lvl < map_tbl->lvl.cipso_size &&
715 map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) {
716 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
717 return 0;
718 }
719 return -EPERM;
720 }
721
722 return -EINVAL;
723}
724
725
726
727
728
729
730
731
732
733
734
735
736
737static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
738 const unsigned char *bitmap,
739 u32 bitmap_len)
740{
741 int cat = -1;
742 u32 bitmap_len_bits = bitmap_len * 8;
743 u32 cipso_cat_size;
744 u32 *cipso_array;
745
746 switch (doi_def->type) {
747 case CIPSO_V4_MAP_PASS:
748 return 0;
749 case CIPSO_V4_MAP_TRANS:
750 cipso_cat_size = doi_def->map.std->cat.cipso_size;
751 cipso_array = doi_def->map.std->cat.cipso;
752 for (;;) {
753 cat = netlbl_bitmap_walk(bitmap,
754 bitmap_len_bits,
755 cat + 1,
756 1);
757 if (cat < 0)
758 break;
759 if (cat >= cipso_cat_size ||
760 cipso_array[cat] >= CIPSO_V4_INV_CAT)
761 return -EFAULT;
762 }
763
764 if (cat == -1)
765 return 0;
766 break;
767 }
768
769 return -EFAULT;
770}
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
786 const struct netlbl_lsm_secattr *secattr,
787 unsigned char *net_cat,
788 u32 net_cat_len)
789{
790 int host_spot = -1;
791 u32 net_spot = CIPSO_V4_INV_CAT;
792 u32 net_spot_max = 0;
793 u32 net_clen_bits = net_cat_len * 8;
794 u32 host_cat_size = 0;
795 u32 *host_cat_array = NULL;
796
797 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
798 host_cat_size = doi_def->map.std->cat.local_size;
799 host_cat_array = doi_def->map.std->cat.local;
800 }
801
802 for (;;) {
803 host_spot = netlbl_catmap_walk(secattr->attr.mls.cat,
804 host_spot + 1);
805 if (host_spot < 0)
806 break;
807
808 switch (doi_def->type) {
809 case CIPSO_V4_MAP_PASS:
810 net_spot = host_spot;
811 break;
812 case CIPSO_V4_MAP_TRANS:
813 if (host_spot >= host_cat_size)
814 return -EPERM;
815 net_spot = host_cat_array[host_spot];
816 if (net_spot >= CIPSO_V4_INV_CAT)
817 return -EPERM;
818 break;
819 }
820 if (net_spot >= net_clen_bits)
821 return -ENOSPC;
822 netlbl_bitmap_setbit(net_cat, net_spot, 1);
823
824 if (net_spot > net_spot_max)
825 net_spot_max = net_spot;
826 }
827
828 if (++net_spot_max % 8)
829 return net_spot_max / 8 + 1;
830 return net_spot_max / 8;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
847 const unsigned char *net_cat,
848 u32 net_cat_len,
849 struct netlbl_lsm_secattr *secattr)
850{
851 int ret_val;
852 int net_spot = -1;
853 u32 host_spot = CIPSO_V4_INV_CAT;
854 u32 net_clen_bits = net_cat_len * 8;
855 u32 net_cat_size = 0;
856 u32 *net_cat_array = NULL;
857
858 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
859 net_cat_size = doi_def->map.std->cat.cipso_size;
860 net_cat_array = doi_def->map.std->cat.cipso;
861 }
862
863 for (;;) {
864 net_spot = netlbl_bitmap_walk(net_cat,
865 net_clen_bits,
866 net_spot + 1,
867 1);
868 if (net_spot < 0) {
869 if (net_spot == -2)
870 return -EFAULT;
871 return 0;
872 }
873
874 switch (doi_def->type) {
875 case CIPSO_V4_MAP_PASS:
876 host_spot = net_spot;
877 break;
878 case CIPSO_V4_MAP_TRANS:
879 if (net_spot >= net_cat_size)
880 return -EPERM;
881 host_spot = net_cat_array[net_spot];
882 if (host_spot >= CIPSO_V4_INV_CAT)
883 return -EPERM;
884 break;
885 }
886 ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
887 host_spot,
888 GFP_ATOMIC);
889 if (ret_val != 0)
890 return ret_val;
891 }
892
893 return -EINVAL;
894}
895
896
897
898
899
900
901
902
903
904
905
906
907
908static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
909 const unsigned char *enumcat,
910 u32 enumcat_len)
911{
912 u16 cat;
913 int cat_prev = -1;
914 u32 iter;
915
916 if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
917 return -EFAULT;
918
919 for (iter = 0; iter < enumcat_len; iter += 2) {
920 cat = get_unaligned_be16(&enumcat[iter]);
921 if (cat <= cat_prev)
922 return -EFAULT;
923 cat_prev = cat;
924 }
925
926 return 0;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
944 const struct netlbl_lsm_secattr *secattr,
945 unsigned char *net_cat,
946 u32 net_cat_len)
947{
948 int cat = -1;
949 u32 cat_iter = 0;
950
951 for (;;) {
952 cat = netlbl_catmap_walk(secattr->attr.mls.cat, cat + 1);
953 if (cat < 0)
954 break;
955 if ((cat_iter + 2) > net_cat_len)
956 return -ENOSPC;
957
958 *((__be16 *)&net_cat[cat_iter]) = htons(cat);
959 cat_iter += 2;
960 }
961
962 return cat_iter;
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
979 const unsigned char *net_cat,
980 u32 net_cat_len,
981 struct netlbl_lsm_secattr *secattr)
982{
983 int ret_val;
984 u32 iter;
985
986 for (iter = 0; iter < net_cat_len; iter += 2) {
987 ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
988 get_unaligned_be16(&net_cat[iter]),
989 GFP_ATOMIC);
990 if (ret_val != 0)
991 return ret_val;
992 }
993
994 return 0;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
1010 const unsigned char *rngcat,
1011 u32 rngcat_len)
1012{
1013 u16 cat_high;
1014 u16 cat_low;
1015 u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
1016 u32 iter;
1017
1018 if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
1019 return -EFAULT;
1020
1021 for (iter = 0; iter < rngcat_len; iter += 4) {
1022 cat_high = get_unaligned_be16(&rngcat[iter]);
1023 if ((iter + 4) <= rngcat_len)
1024 cat_low = get_unaligned_be16(&rngcat[iter + 2]);
1025 else
1026 cat_low = 0;
1027
1028 if (cat_high > cat_prev)
1029 return -EFAULT;
1030
1031 cat_prev = cat_low;
1032 }
1033
1034 return 0;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
1052 const struct netlbl_lsm_secattr *secattr,
1053 unsigned char *net_cat,
1054 u32 net_cat_len)
1055{
1056 int iter = -1;
1057 u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2];
1058 u32 array_cnt = 0;
1059 u32 cat_size = 0;
1060
1061
1062 if (net_cat_len >
1063 (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN))
1064 return -ENOSPC;
1065
1066 for (;;) {
1067 iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1);
1068 if (iter < 0)
1069 break;
1070 cat_size += (iter == 0 ? 0 : sizeof(u16));
1071 if (cat_size > net_cat_len)
1072 return -ENOSPC;
1073 array[array_cnt++] = iter;
1074
1075 iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter);
1076 if (iter < 0)
1077 return -EFAULT;
1078 cat_size += sizeof(u16);
1079 if (cat_size > net_cat_len)
1080 return -ENOSPC;
1081 array[array_cnt++] = iter;
1082 }
1083
1084 for (iter = 0; array_cnt > 0;) {
1085 *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
1086 iter += 2;
1087 array_cnt--;
1088 if (array[array_cnt] != 0) {
1089 *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
1090 iter += 2;
1091 }
1092 }
1093
1094 return cat_size;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
1111 const unsigned char *net_cat,
1112 u32 net_cat_len,
1113 struct netlbl_lsm_secattr *secattr)
1114{
1115 int ret_val;
1116 u32 net_iter;
1117 u16 cat_low;
1118 u16 cat_high;
1119
1120 for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
1121 cat_high = get_unaligned_be16(&net_cat[net_iter]);
1122 if ((net_iter + 4) <= net_cat_len)
1123 cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
1124 else
1125 cat_low = 0;
1126
1127 ret_val = netlbl_catmap_setrng(&secattr->attr.mls.cat,
1128 cat_low,
1129 cat_high,
1130 GFP_ATOMIC);
1131 if (ret_val != 0)
1132 return ret_val;
1133 }
1134
1135 return 0;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
1153 unsigned char *buf,
1154 u32 len)
1155{
1156 buf[0] = IPOPT_CIPSO;
1157 buf[1] = CIPSO_V4_HDR_LEN + len;
1158 put_unaligned_be32(doi_def->doi, &buf[2]);
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
1176 const struct netlbl_lsm_secattr *secattr,
1177 unsigned char *buffer,
1178 u32 buffer_len)
1179{
1180 int ret_val;
1181 u32 tag_len;
1182 u32 level;
1183
1184 if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
1185 return -EPERM;
1186
1187 ret_val = cipso_v4_map_lvl_hton(doi_def,
1188 secattr->attr.mls.lvl,
1189 &level);
1190 if (ret_val != 0)
1191 return ret_val;
1192
1193 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1194 ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
1195 secattr,
1196 &buffer[4],
1197 buffer_len - 4);
1198 if (ret_val < 0)
1199 return ret_val;
1200
1201
1202
1203
1204 if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
1205 tag_len = 14;
1206 else
1207 tag_len = 4 + ret_val;
1208 } else
1209 tag_len = 4;
1210
1211 buffer[0] = CIPSO_V4_TAG_RBITMAP;
1212 buffer[1] = tag_len;
1213 buffer[3] = level;
1214
1215 return tag_len;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
1231 const unsigned char *tag,
1232 struct netlbl_lsm_secattr *secattr)
1233{
1234 int ret_val;
1235 u8 tag_len = tag[1];
1236 u32 level;
1237
1238 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1239 if (ret_val != 0)
1240 return ret_val;
1241 secattr->attr.mls.lvl = level;
1242 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1243
1244 if (tag_len > 4) {
1245 ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
1246 &tag[4],
1247 tag_len - 4,
1248 secattr);
1249 if (ret_val != 0) {
1250 netlbl_catmap_free(secattr->attr.mls.cat);
1251 return ret_val;
1252 }
1253
1254 if (secattr->attr.mls.cat)
1255 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1256 }
1257
1258 return 0;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
1274 const struct netlbl_lsm_secattr *secattr,
1275 unsigned char *buffer,
1276 u32 buffer_len)
1277{
1278 int ret_val;
1279 u32 tag_len;
1280 u32 level;
1281
1282 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1283 return -EPERM;
1284
1285 ret_val = cipso_v4_map_lvl_hton(doi_def,
1286 secattr->attr.mls.lvl,
1287 &level);
1288 if (ret_val != 0)
1289 return ret_val;
1290
1291 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1292 ret_val = cipso_v4_map_cat_enum_hton(doi_def,
1293 secattr,
1294 &buffer[4],
1295 buffer_len - 4);
1296 if (ret_val < 0)
1297 return ret_val;
1298
1299 tag_len = 4 + ret_val;
1300 } else
1301 tag_len = 4;
1302
1303 buffer[0] = CIPSO_V4_TAG_ENUM;
1304 buffer[1] = tag_len;
1305 buffer[3] = level;
1306
1307 return tag_len;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
1323 const unsigned char *tag,
1324 struct netlbl_lsm_secattr *secattr)
1325{
1326 int ret_val;
1327 u8 tag_len = tag[1];
1328 u32 level;
1329
1330 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1331 if (ret_val != 0)
1332 return ret_val;
1333 secattr->attr.mls.lvl = level;
1334 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1335
1336 if (tag_len > 4) {
1337 ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
1338 &tag[4],
1339 tag_len - 4,
1340 secattr);
1341 if (ret_val != 0) {
1342 netlbl_catmap_free(secattr->attr.mls.cat);
1343 return ret_val;
1344 }
1345
1346 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1347 }
1348
1349 return 0;
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
1365 const struct netlbl_lsm_secattr *secattr,
1366 unsigned char *buffer,
1367 u32 buffer_len)
1368{
1369 int ret_val;
1370 u32 tag_len;
1371 u32 level;
1372
1373 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1374 return -EPERM;
1375
1376 ret_val = cipso_v4_map_lvl_hton(doi_def,
1377 secattr->attr.mls.lvl,
1378 &level);
1379 if (ret_val != 0)
1380 return ret_val;
1381
1382 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1383 ret_val = cipso_v4_map_cat_rng_hton(doi_def,
1384 secattr,
1385 &buffer[4],
1386 buffer_len - 4);
1387 if (ret_val < 0)
1388 return ret_val;
1389
1390 tag_len = 4 + ret_val;
1391 } else
1392 tag_len = 4;
1393
1394 buffer[0] = CIPSO_V4_TAG_RANGE;
1395 buffer[1] = tag_len;
1396 buffer[3] = level;
1397
1398 return tag_len;
1399}
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
1413 const unsigned char *tag,
1414 struct netlbl_lsm_secattr *secattr)
1415{
1416 int ret_val;
1417 u8 tag_len = tag[1];
1418 u32 level;
1419
1420 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1421 if (ret_val != 0)
1422 return ret_val;
1423 secattr->attr.mls.lvl = level;
1424 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1425
1426 if (tag_len > 4) {
1427 ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
1428 &tag[4],
1429 tag_len - 4,
1430 secattr);
1431 if (ret_val != 0) {
1432 netlbl_catmap_free(secattr->attr.mls.cat);
1433 return ret_val;
1434 }
1435
1436 if (secattr->attr.mls.cat)
1437 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1438 }
1439
1440 return 0;
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def,
1456 const struct netlbl_lsm_secattr *secattr,
1457 unsigned char *buffer,
1458 u32 buffer_len)
1459{
1460 if (!(secattr->flags & NETLBL_SECATTR_SECID))
1461 return -EPERM;
1462
1463 buffer[0] = CIPSO_V4_TAG_LOCAL;
1464 buffer[1] = CIPSO_V4_TAG_LOC_BLEN;
1465 *(u32 *)&buffer[2] = secattr->attr.secid;
1466
1467 return CIPSO_V4_TAG_LOC_BLEN;
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
1482 const unsigned char *tag,
1483 struct netlbl_lsm_secattr *secattr)
1484{
1485 secattr->attr.secid = *(u32 *)&tag[2];
1486 secattr->flags |= NETLBL_SECATTR_SECID;
1487
1488 return 0;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
1501{
1502 const struct iphdr *iph = ip_hdr(skb);
1503 unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
1504 int optlen;
1505 int taglen;
1506
1507 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
1508 switch (optptr[0]) {
1509 case IPOPT_END:
1510 return NULL;
1511 case IPOPT_NOOP:
1512 taglen = 1;
1513 break;
1514 default:
1515 taglen = optptr[1];
1516 }
1517 if (!taglen || taglen > optlen)
1518 return NULL;
1519 if (optptr[0] == IPOPT_CIPSO)
1520 return optptr;
1521
1522 optlen -= taglen;
1523 optptr += taglen;
1524 }
1525
1526 return NULL;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1550{
1551 unsigned char *opt = *option;
1552 unsigned char *tag;
1553 unsigned char opt_iter;
1554 unsigned char err_offset = 0;
1555 u8 opt_len;
1556 u8 tag_len;
1557 struct cipso_v4_doi *doi_def = NULL;
1558 u32 tag_iter;
1559
1560
1561 opt_len = opt[1];
1562 if (opt_len < 8) {
1563 err_offset = 1;
1564 goto validate_return;
1565 }
1566
1567 rcu_read_lock();
1568 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
1569 if (!doi_def) {
1570 err_offset = 2;
1571 goto validate_return_locked;
1572 }
1573
1574 opt_iter = CIPSO_V4_HDR_LEN;
1575 tag = opt + opt_iter;
1576 while (opt_iter < opt_len) {
1577 for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
1578 if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
1579 ++tag_iter == CIPSO_V4_TAG_MAXCNT) {
1580 err_offset = opt_iter;
1581 goto validate_return_locked;
1582 }
1583
1584 if (opt_iter + 1 == opt_len) {
1585 err_offset = opt_iter;
1586 goto validate_return_locked;
1587 }
1588 tag_len = tag[1];
1589 if (tag_len > (opt_len - opt_iter)) {
1590 err_offset = opt_iter + 1;
1591 goto validate_return_locked;
1592 }
1593
1594 switch (tag[0]) {
1595 case CIPSO_V4_TAG_RBITMAP:
1596 if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
1597 err_offset = opt_iter + 1;
1598 goto validate_return_locked;
1599 }
1600
1601
1602
1603
1604
1605
1606
1607
1608 if (cipso_v4_rbm_strictvalid) {
1609 if (cipso_v4_map_lvl_valid(doi_def,
1610 tag[3]) < 0) {
1611 err_offset = opt_iter + 3;
1612 goto validate_return_locked;
1613 }
1614 if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
1615 cipso_v4_map_cat_rbm_valid(doi_def,
1616 &tag[4],
1617 tag_len - 4) < 0) {
1618 err_offset = opt_iter + 4;
1619 goto validate_return_locked;
1620 }
1621 }
1622 break;
1623 case CIPSO_V4_TAG_ENUM:
1624 if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
1625 err_offset = opt_iter + 1;
1626 goto validate_return_locked;
1627 }
1628
1629 if (cipso_v4_map_lvl_valid(doi_def,
1630 tag[3]) < 0) {
1631 err_offset = opt_iter + 3;
1632 goto validate_return_locked;
1633 }
1634 if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
1635 cipso_v4_map_cat_enum_valid(doi_def,
1636 &tag[4],
1637 tag_len - 4) < 0) {
1638 err_offset = opt_iter + 4;
1639 goto validate_return_locked;
1640 }
1641 break;
1642 case CIPSO_V4_TAG_RANGE:
1643 if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
1644 err_offset = opt_iter + 1;
1645 goto validate_return_locked;
1646 }
1647
1648 if (cipso_v4_map_lvl_valid(doi_def,
1649 tag[3]) < 0) {
1650 err_offset = opt_iter + 3;
1651 goto validate_return_locked;
1652 }
1653 if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
1654 cipso_v4_map_cat_rng_valid(doi_def,
1655 &tag[4],
1656 tag_len - 4) < 0) {
1657 err_offset = opt_iter + 4;
1658 goto validate_return_locked;
1659 }
1660 break;
1661 case CIPSO_V4_TAG_LOCAL:
1662
1663
1664
1665
1666
1667 if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
1668 err_offset = opt_iter;
1669 goto validate_return_locked;
1670 }
1671 if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
1672 err_offset = opt_iter + 1;
1673 goto validate_return_locked;
1674 }
1675 break;
1676 default:
1677 err_offset = opt_iter;
1678 goto validate_return_locked;
1679 }
1680
1681 tag += tag_len;
1682 opt_iter += tag_len;
1683 }
1684
1685validate_return_locked:
1686 rcu_read_unlock();
1687validate_return:
1688 *option = opt + err_offset;
1689 return err_offset;
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1720{
1721 unsigned char optbuf[sizeof(struct ip_options) + 40];
1722 struct ip_options *opt = (struct ip_options *)optbuf;
1723 int res;
1724
1725 if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
1726 return;
1727
1728
1729
1730
1731
1732
1733 memset(opt, 0, sizeof(struct ip_options));
1734 opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1735 rcu_read_lock();
1736 res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
1737 rcu_read_unlock();
1738
1739 if (res)
1740 return;
1741
1742 if (gateway)
1743 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
1744 else
1745 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
1746}
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1762 const struct cipso_v4_doi *doi_def,
1763 const struct netlbl_lsm_secattr *secattr)
1764{
1765 int ret_val;
1766 u32 iter;
1767
1768 if (buf_len <= CIPSO_V4_HDR_LEN)
1769 return -ENOSPC;
1770
1771
1772
1773
1774 iter = 0;
1775 do {
1776 memset(buf, 0, buf_len);
1777 switch (doi_def->tags[iter]) {
1778 case CIPSO_V4_TAG_RBITMAP:
1779 ret_val = cipso_v4_gentag_rbm(doi_def,
1780 secattr,
1781 &buf[CIPSO_V4_HDR_LEN],
1782 buf_len - CIPSO_V4_HDR_LEN);
1783 break;
1784 case CIPSO_V4_TAG_ENUM:
1785 ret_val = cipso_v4_gentag_enum(doi_def,
1786 secattr,
1787 &buf[CIPSO_V4_HDR_LEN],
1788 buf_len - CIPSO_V4_HDR_LEN);
1789 break;
1790 case CIPSO_V4_TAG_RANGE:
1791 ret_val = cipso_v4_gentag_rng(doi_def,
1792 secattr,
1793 &buf[CIPSO_V4_HDR_LEN],
1794 buf_len - CIPSO_V4_HDR_LEN);
1795 break;
1796 case CIPSO_V4_TAG_LOCAL:
1797 ret_val = cipso_v4_gentag_loc(doi_def,
1798 secattr,
1799 &buf[CIPSO_V4_HDR_LEN],
1800 buf_len - CIPSO_V4_HDR_LEN);
1801 break;
1802 default:
1803 return -EPERM;
1804 }
1805
1806 iter++;
1807 } while (ret_val < 0 &&
1808 iter < CIPSO_V4_TAG_MAXCNT &&
1809 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
1810 if (ret_val < 0)
1811 return ret_val;
1812 cipso_v4_gentag_hdr(doi_def, buf, ret_val);
1813 return CIPSO_V4_HDR_LEN + ret_val;
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830int cipso_v4_sock_setattr(struct sock *sk,
1831 const struct cipso_v4_doi *doi_def,
1832 const struct netlbl_lsm_secattr *secattr)
1833{
1834 int ret_val = -EPERM;
1835 unsigned char *buf = NULL;
1836 u32 buf_len;
1837 u32 opt_len;
1838 struct ip_options_rcu *old, *opt = NULL;
1839 struct inet_sock *sk_inet;
1840 struct inet_connection_sock *sk_conn;
1841
1842
1843
1844
1845
1846 if (!sk)
1847 return 0;
1848
1849
1850
1851
1852 buf_len = CIPSO_V4_OPT_LEN_MAX;
1853 buf = kmalloc(buf_len, GFP_ATOMIC);
1854 if (!buf) {
1855 ret_val = -ENOMEM;
1856 goto socket_setattr_failure;
1857 }
1858
1859 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1860 if (ret_val < 0)
1861 goto socket_setattr_failure;
1862 buf_len = ret_val;
1863
1864
1865
1866
1867
1868 opt_len = (buf_len + 3) & ~3;
1869 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1870 if (!opt) {
1871 ret_val = -ENOMEM;
1872 goto socket_setattr_failure;
1873 }
1874 memcpy(opt->opt.__data, buf, buf_len);
1875 opt->opt.optlen = opt_len;
1876 opt->opt.cipso = sizeof(struct iphdr);
1877 kfree(buf);
1878 buf = NULL;
1879
1880 sk_inet = inet_sk(sk);
1881
1882 old = rcu_dereference_protected(sk_inet->inet_opt,
1883 lockdep_sock_is_held(sk));
1884 if (sk_inet->is_icsk) {
1885 sk_conn = inet_csk(sk);
1886 if (old)
1887 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1888 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1889 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1890 }
1891 rcu_assign_pointer(sk_inet->inet_opt, opt);
1892 if (old)
1893 kfree_rcu(old, rcu);
1894
1895 return 0;
1896
1897socket_setattr_failure:
1898 kfree(buf);
1899 kfree(opt);
1900 return ret_val;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915int cipso_v4_req_setattr(struct request_sock *req,
1916 const struct cipso_v4_doi *doi_def,
1917 const struct netlbl_lsm_secattr *secattr)
1918{
1919 int ret_val = -EPERM;
1920 unsigned char *buf = NULL;
1921 u32 buf_len;
1922 u32 opt_len;
1923 struct ip_options_rcu *opt = NULL;
1924 struct inet_request_sock *req_inet;
1925
1926
1927
1928
1929 buf_len = CIPSO_V4_OPT_LEN_MAX;
1930 buf = kmalloc(buf_len, GFP_ATOMIC);
1931 if (!buf) {
1932 ret_val = -ENOMEM;
1933 goto req_setattr_failure;
1934 }
1935
1936 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1937 if (ret_val < 0)
1938 goto req_setattr_failure;
1939 buf_len = ret_val;
1940
1941
1942
1943
1944
1945 opt_len = (buf_len + 3) & ~3;
1946 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1947 if (!opt) {
1948 ret_val = -ENOMEM;
1949 goto req_setattr_failure;
1950 }
1951 memcpy(opt->opt.__data, buf, buf_len);
1952 opt->opt.optlen = opt_len;
1953 opt->opt.cipso = sizeof(struct iphdr);
1954 kfree(buf);
1955 buf = NULL;
1956
1957 req_inet = inet_rsk(req);
1958 opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
1959 if (opt)
1960 kfree_rcu(opt, rcu);
1961
1962 return 0;
1963
1964req_setattr_failure:
1965 kfree(buf);
1966 kfree(opt);
1967 return ret_val;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
1981{
1982 struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
1983 int hdr_delta = 0;
1984
1985 if (!opt || opt->opt.cipso == 0)
1986 return 0;
1987 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
1988 u8 cipso_len;
1989 u8 cipso_off;
1990 unsigned char *cipso_ptr;
1991 int iter;
1992 int optlen_new;
1993
1994 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
1995 cipso_ptr = &opt->opt.__data[cipso_off];
1996 cipso_len = cipso_ptr[1];
1997
1998 if (opt->opt.srr > opt->opt.cipso)
1999 opt->opt.srr -= cipso_len;
2000 if (opt->opt.rr > opt->opt.cipso)
2001 opt->opt.rr -= cipso_len;
2002 if (opt->opt.ts > opt->opt.cipso)
2003 opt->opt.ts -= cipso_len;
2004 if (opt->opt.router_alert > opt->opt.cipso)
2005 opt->opt.router_alert -= cipso_len;
2006 opt->opt.cipso = 0;
2007
2008 memmove(cipso_ptr, cipso_ptr + cipso_len,
2009 opt->opt.optlen - cipso_off - cipso_len);
2010
2011
2012
2013
2014
2015
2016 iter = 0;
2017 optlen_new = 0;
2018 while (iter < opt->opt.optlen)
2019 if (opt->opt.__data[iter] != IPOPT_NOP) {
2020 iter += opt->opt.__data[iter + 1];
2021 optlen_new = iter;
2022 } else
2023 iter++;
2024 hdr_delta = opt->opt.optlen;
2025 opt->opt.optlen = (optlen_new + 3) & ~3;
2026 hdr_delta -= opt->opt.optlen;
2027 } else {
2028
2029
2030 *opt_ptr = NULL;
2031 hdr_delta = opt->opt.optlen;
2032 kfree_rcu(opt, rcu);
2033 }
2034
2035 return hdr_delta;
2036}
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046void cipso_v4_sock_delattr(struct sock *sk)
2047{
2048 struct inet_sock *sk_inet;
2049 int hdr_delta;
2050
2051 sk_inet = inet_sk(sk);
2052
2053 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2054 if (sk_inet->is_icsk && hdr_delta > 0) {
2055 struct inet_connection_sock *sk_conn = inet_csk(sk);
2056 sk_conn->icsk_ext_hdr_len -= hdr_delta;
2057 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
2058 }
2059}
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069void cipso_v4_req_delattr(struct request_sock *req)
2070{
2071 cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084int cipso_v4_getattr(const unsigned char *cipso,
2085 struct netlbl_lsm_secattr *secattr)
2086{
2087 int ret_val = -ENOMSG;
2088 u32 doi;
2089 struct cipso_v4_doi *doi_def;
2090
2091 if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
2092 return 0;
2093
2094 doi = get_unaligned_be32(&cipso[2]);
2095 rcu_read_lock();
2096 doi_def = cipso_v4_doi_search(doi);
2097 if (!doi_def)
2098 goto getattr_return;
2099
2100
2101
2102 switch (cipso[6]) {
2103 case CIPSO_V4_TAG_RBITMAP:
2104 ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr);
2105 break;
2106 case CIPSO_V4_TAG_ENUM:
2107 ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr);
2108 break;
2109 case CIPSO_V4_TAG_RANGE:
2110 ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
2111 break;
2112 case CIPSO_V4_TAG_LOCAL:
2113 ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr);
2114 break;
2115 }
2116 if (ret_val == 0)
2117 secattr->type = NETLBL_NLTYPE_CIPSOV4;
2118
2119getattr_return:
2120 rcu_read_unlock();
2121 return ret_val;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2137{
2138 struct ip_options_rcu *opt;
2139 int res = -ENOMSG;
2140
2141 rcu_read_lock();
2142 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2143 if (opt && opt->opt.cipso)
2144 res = cipso_v4_getattr(opt->opt.__data +
2145 opt->opt.cipso -
2146 sizeof(struct iphdr),
2147 secattr);
2148 rcu_read_unlock();
2149 return res;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163int cipso_v4_skbuff_setattr(struct sk_buff *skb,
2164 const struct cipso_v4_doi *doi_def,
2165 const struct netlbl_lsm_secattr *secattr)
2166{
2167 int ret_val;
2168 struct iphdr *iph;
2169 struct ip_options *opt = &IPCB(skb)->opt;
2170 unsigned char buf[CIPSO_V4_OPT_LEN_MAX];
2171 u32 buf_len = CIPSO_V4_OPT_LEN_MAX;
2172 u32 opt_len;
2173 int len_delta;
2174
2175 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
2176 if (ret_val < 0)
2177 return ret_val;
2178 buf_len = ret_val;
2179 opt_len = (buf_len + 3) & ~3;
2180
2181
2182
2183
2184
2185
2186
2187 len_delta = opt_len - opt->optlen;
2188
2189
2190
2191 ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
2192 if (ret_val < 0)
2193 return ret_val;
2194
2195 if (len_delta > 0) {
2196
2197
2198 iph = ip_hdr(skb);
2199 skb_push(skb, len_delta);
2200 memmove((char *)iph - len_delta, iph, iph->ihl << 2);
2201 skb_reset_network_header(skb);
2202 iph = ip_hdr(skb);
2203 } else if (len_delta < 0) {
2204 iph = ip_hdr(skb);
2205 memset(iph + 1, IPOPT_NOP, opt->optlen);
2206 } else
2207 iph = ip_hdr(skb);
2208
2209 if (opt->optlen > 0)
2210 memset(opt, 0, sizeof(*opt));
2211 opt->optlen = opt_len;
2212 opt->cipso = sizeof(struct iphdr);
2213 opt->is_changed = 1;
2214
2215
2216
2217
2218
2219
2220 memcpy(iph + 1, buf, buf_len);
2221 if (opt_len > buf_len)
2222 memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
2223 if (len_delta != 0) {
2224 iph->ihl = 5 + (opt_len >> 2);
2225 iph->tot_len = htons(skb->len);
2226 }
2227 ip_send_check(iph);
2228
2229 return 0;
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241int cipso_v4_skbuff_delattr(struct sk_buff *skb)
2242{
2243 int ret_val;
2244 struct iphdr *iph;
2245 struct ip_options *opt = &IPCB(skb)->opt;
2246 unsigned char *cipso_ptr;
2247
2248 if (opt->cipso == 0)
2249 return 0;
2250
2251
2252 ret_val = skb_cow(skb, skb_headroom(skb));
2253 if (ret_val < 0)
2254 return ret_val;
2255
2256
2257
2258
2259
2260 iph = ip_hdr(skb);
2261 cipso_ptr = (unsigned char *)iph + opt->cipso;
2262 memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
2263 opt->cipso = 0;
2264 opt->is_changed = 1;
2265
2266 ip_send_check(iph);
2267
2268 return 0;
2269}
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static int __init cipso_v4_init(void)
2284{
2285 int ret_val;
2286
2287 ret_val = cipso_v4_cache_init();
2288 if (ret_val != 0)
2289 panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n",
2290 ret_val);
2291
2292 return 0;
2293}
2294
2295subsys_initcall(cipso_v4_init);
2296