1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/init.h>
40#include <linux/types.h>
41#include <linux/rcupdate.h>
42#include <linux/list.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include <linux/jhash.h>
46#include <linux/audit.h>
47#include <net/ip.h>
48#include <net/icmp.h>
49#include <net/tcp.h>
50#include <net/netlabel.h>
51#include <net/cipso_ipv4.h>
52#include <asm/atomic.h>
53#include <asm/bug.h>
54#include <asm/unaligned.h>
55
56
57
58
59
60
61static DEFINE_SPINLOCK(cipso_v4_doi_list_lock);
62static LIST_HEAD(cipso_v4_doi_list);
63
64
65int cipso_v4_cache_enabled = 1;
66int cipso_v4_cache_bucketsize = 10;
67#define CIPSO_V4_CACHE_BUCKETBITS 7
68#define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS)
69#define CIPSO_V4_CACHE_REORDERLIMIT 10
70struct cipso_v4_map_cache_bkt {
71 spinlock_t lock;
72 u32 size;
73 struct list_head list;
74};
75struct cipso_v4_map_cache_entry {
76 u32 hash;
77 unsigned char *key;
78 size_t key_len;
79
80 struct netlbl_lsm_cache *lsm_data;
81
82 u32 activity;
83 struct list_head list;
84};
85static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL;
86
87
88int cipso_v4_rbm_optfmt = 0;
89int cipso_v4_rbm_strictvalid = 1;
90
91
92
93
94
95
96
97#define CIPSO_V4_OPT_LEN_MAX 40
98
99
100
101#define CIPSO_V4_HDR_LEN 6
102
103
104#define CIPSO_V4_TAG_RBM_BLEN 4
105
106
107#define CIPSO_V4_TAG_ENUM_BLEN 4
108
109
110#define CIPSO_V4_TAG_RNG_BLEN 4
111
112
113
114
115
116#define CIPSO_V4_TAG_RNG_CAT_MAX 8
117
118
119
120
121
122
123
124
125
126
127
128
129#define CIPSO_V4_TAG_LOC_BLEN 6
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147static int cipso_v4_bitmap_walk(const unsigned char *bitmap,
148 u32 bitmap_len,
149 u32 offset,
150 u8 state)
151{
152 u32 bit_spot;
153 u32 byte_offset;
154 unsigned char bitmask;
155 unsigned char byte;
156
157
158 byte_offset = offset / 8;
159 byte = bitmap[byte_offset];
160 bit_spot = offset;
161 bitmask = 0x80 >> (offset % 8);
162
163 while (bit_spot < bitmap_len) {
164 if ((state && (byte & bitmask) == bitmask) ||
165 (state == 0 && (byte & bitmask) == 0))
166 return bit_spot;
167
168 bit_spot++;
169 bitmask >>= 1;
170 if (bitmask == 0) {
171 byte = bitmap[++byte_offset];
172 bitmask = 0x80;
173 }
174 }
175
176 return -1;
177}
178
179
180
181
182
183
184
185
186
187
188
189static void cipso_v4_bitmap_setbit(unsigned char *bitmap,
190 u32 bit,
191 u8 state)
192{
193 u32 byte_spot;
194 u8 bitmask;
195
196
197 byte_spot = bit / 8;
198 bitmask = 0x80 >> (bit % 8);
199 if (state)
200 bitmap[byte_spot] |= bitmask;
201 else
202 bitmap[byte_spot] &= ~bitmask;
203}
204
205
206
207
208
209
210
211
212
213
214static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
215{
216 if (entry->lsm_data)
217 netlbl_secattr_cache_free(entry->lsm_data);
218 kfree(entry->key);
219 kfree(entry);
220}
221
222
223
224
225
226
227
228
229
230
231static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
232{
233 return jhash(key, key_len, 0);
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249static int cipso_v4_cache_init(void)
250{
251 u32 iter;
252
253 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
254 sizeof(struct cipso_v4_map_cache_bkt),
255 GFP_KERNEL);
256 if (cipso_v4_cache == NULL)
257 return -ENOMEM;
258
259 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
260 spin_lock_init(&cipso_v4_cache[iter].lock);
261 cipso_v4_cache[iter].size = 0;
262 INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
263 }
264
265 return 0;
266}
267
268
269
270
271
272
273
274
275
276void cipso_v4_cache_invalidate(void)
277{
278 struct cipso_v4_map_cache_entry *entry, *tmp_entry;
279 u32 iter;
280
281 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
282 spin_lock_bh(&cipso_v4_cache[iter].lock);
283 list_for_each_entry_safe(entry,
284 tmp_entry,
285 &cipso_v4_cache[iter].list, list) {
286 list_del(&entry->list);
287 cipso_v4_cache_entry_free(entry);
288 }
289 cipso_v4_cache[iter].size = 0;
290 spin_unlock_bh(&cipso_v4_cache[iter].lock);
291 }
292
293 return;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318static int cipso_v4_cache_check(const unsigned char *key,
319 u32 key_len,
320 struct netlbl_lsm_secattr *secattr)
321{
322 u32 bkt;
323 struct cipso_v4_map_cache_entry *entry;
324 struct cipso_v4_map_cache_entry *prev_entry = NULL;
325 u32 hash;
326
327 if (!cipso_v4_cache_enabled)
328 return -ENOENT;
329
330 hash = cipso_v4_map_cache_hash(key, key_len);
331 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
332 spin_lock_bh(&cipso_v4_cache[bkt].lock);
333 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
334 if (entry->hash == hash &&
335 entry->key_len == key_len &&
336 memcmp(entry->key, key, key_len) == 0) {
337 entry->activity += 1;
338 atomic_inc(&entry->lsm_data->refcount);
339 secattr->cache = entry->lsm_data;
340 secattr->flags |= NETLBL_SECATTR_CACHE;
341 secattr->type = NETLBL_NLTYPE_CIPSOV4;
342 if (prev_entry == NULL) {
343 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
344 return 0;
345 }
346
347 if (prev_entry->activity > 0)
348 prev_entry->activity -= 1;
349 if (entry->activity > prev_entry->activity &&
350 entry->activity - prev_entry->activity >
351 CIPSO_V4_CACHE_REORDERLIMIT) {
352 __list_del(entry->list.prev, entry->list.next);
353 __list_add(&entry->list,
354 prev_entry->list.prev,
355 &prev_entry->list);
356 }
357
358 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
359 return 0;
360 }
361 prev_entry = entry;
362 }
363 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
364
365 return -ENOENT;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381int cipso_v4_cache_add(const struct sk_buff *skb,
382 const struct netlbl_lsm_secattr *secattr)
383{
384 int ret_val = -EPERM;
385 u32 bkt;
386 struct cipso_v4_map_cache_entry *entry = NULL;
387 struct cipso_v4_map_cache_entry *old_entry = NULL;
388 unsigned char *cipso_ptr;
389 u32 cipso_ptr_len;
390
391 if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
392 return 0;
393
394 cipso_ptr = CIPSO_V4_OPTPTR(skb);
395 cipso_ptr_len = cipso_ptr[1];
396
397 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
398 if (entry == NULL)
399 return -ENOMEM;
400 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
401 if (entry->key == NULL) {
402 ret_val = -ENOMEM;
403 goto cache_add_failure;
404 }
405 entry->key_len = cipso_ptr_len;
406 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
407 atomic_inc(&secattr->cache->refcount);
408 entry->lsm_data = secattr->cache;
409
410 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
411 spin_lock_bh(&cipso_v4_cache[bkt].lock);
412 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
413 list_add(&entry->list, &cipso_v4_cache[bkt].list);
414 cipso_v4_cache[bkt].size += 1;
415 } else {
416 old_entry = list_entry(cipso_v4_cache[bkt].list.prev,
417 struct cipso_v4_map_cache_entry, list);
418 list_del(&old_entry->list);
419 list_add(&entry->list, &cipso_v4_cache[bkt].list);
420 cipso_v4_cache_entry_free(old_entry);
421 }
422 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
423
424 return 0;
425
426cache_add_failure:
427 if (entry)
428 cipso_v4_cache_entry_free(entry);
429 return ret_val;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
446{
447 struct cipso_v4_doi *iter;
448
449 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
450 if (iter->doi == doi && atomic_read(&iter->refcount))
451 return iter;
452 return NULL;
453}
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
469 struct netlbl_audit *audit_info)
470{
471 int ret_val = -EINVAL;
472 u32 iter;
473 u32 doi;
474 u32 doi_type;
475 struct audit_buffer *audit_buf;
476
477 doi = doi_def->doi;
478 doi_type = doi_def->type;
479
480 if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
481 goto doi_add_return;
482 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
483 switch (doi_def->tags[iter]) {
484 case CIPSO_V4_TAG_RBITMAP:
485 break;
486 case CIPSO_V4_TAG_RANGE:
487 case CIPSO_V4_TAG_ENUM:
488 if (doi_def->type != CIPSO_V4_MAP_PASS)
489 goto doi_add_return;
490 break;
491 case CIPSO_V4_TAG_LOCAL:
492 if (doi_def->type != CIPSO_V4_MAP_LOCAL)
493 goto doi_add_return;
494 break;
495 case CIPSO_V4_TAG_INVALID:
496 if (iter == 0)
497 goto doi_add_return;
498 break;
499 default:
500 goto doi_add_return;
501 }
502 }
503
504 atomic_set(&doi_def->refcount, 1);
505
506 spin_lock(&cipso_v4_doi_list_lock);
507 if (cipso_v4_doi_search(doi_def->doi) != NULL) {
508 spin_unlock(&cipso_v4_doi_list_lock);
509 ret_val = -EEXIST;
510 goto doi_add_return;
511 }
512 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
513 spin_unlock(&cipso_v4_doi_list_lock);
514 ret_val = 0;
515
516doi_add_return:
517 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
518 if (audit_buf != NULL) {
519 const char *type_str;
520 switch (doi_type) {
521 case CIPSO_V4_MAP_TRANS:
522 type_str = "trans";
523 break;
524 case CIPSO_V4_MAP_PASS:
525 type_str = "pass";
526 break;
527 case CIPSO_V4_MAP_LOCAL:
528 type_str = "local";
529 break;
530 default:
531 type_str = "(unknown)";
532 }
533 audit_log_format(audit_buf,
534 " cipso_doi=%u cipso_type=%s res=%u",
535 doi, type_str, ret_val == 0 ? 1 : 0);
536 audit_log_end(audit_buf);
537 }
538
539 return ret_val;
540}
541
542
543
544
545
546
547
548
549
550void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
551{
552 if (doi_def == NULL)
553 return;
554
555 switch (doi_def->type) {
556 case CIPSO_V4_MAP_TRANS:
557 kfree(doi_def->map.std->lvl.cipso);
558 kfree(doi_def->map.std->lvl.local);
559 kfree(doi_def->map.std->cat.cipso);
560 kfree(doi_def->map.std->cat.local);
561 break;
562 }
563 kfree(doi_def);
564}
565
566
567
568
569
570
571
572
573
574
575
576static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
577{
578 struct cipso_v4_doi *doi_def;
579
580 doi_def = container_of(entry, struct cipso_v4_doi, rcu);
581 cipso_v4_doi_free(doi_def);
582}
583
584
585
586
587
588
589
590
591
592
593
594
595int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
596{
597 int ret_val;
598 struct cipso_v4_doi *doi_def;
599 struct audit_buffer *audit_buf;
600
601 spin_lock(&cipso_v4_doi_list_lock);
602 doi_def = cipso_v4_doi_search(doi);
603 if (doi_def == NULL) {
604 spin_unlock(&cipso_v4_doi_list_lock);
605 ret_val = -ENOENT;
606 goto doi_remove_return;
607 }
608 if (!atomic_dec_and_test(&doi_def->refcount)) {
609 spin_unlock(&cipso_v4_doi_list_lock);
610 ret_val = -EBUSY;
611 goto doi_remove_return;
612 }
613 list_del_rcu(&doi_def->list);
614 spin_unlock(&cipso_v4_doi_list_lock);
615
616 cipso_v4_cache_invalidate();
617 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
618 ret_val = 0;
619
620doi_remove_return:
621 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
622 if (audit_buf != NULL) {
623 audit_log_format(audit_buf,
624 " cipso_doi=%u res=%u",
625 doi, ret_val == 0 ? 1 : 0);
626 audit_log_end(audit_buf);
627 }
628
629 return ret_val;
630}
631
632
633
634
635
636
637
638
639
640
641
642
643struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
644{
645 struct cipso_v4_doi *doi_def;
646
647 rcu_read_lock();
648 doi_def = cipso_v4_doi_search(doi);
649 if (doi_def == NULL)
650 goto doi_getdef_return;
651 if (!atomic_inc_not_zero(&doi_def->refcount))
652 doi_def = NULL;
653
654doi_getdef_return:
655 rcu_read_unlock();
656 return doi_def;
657}
658
659
660
661
662
663
664
665
666
667void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
668{
669 if (doi_def == NULL)
670 return;
671
672 if (!atomic_dec_and_test(&doi_def->refcount))
673 return;
674 spin_lock(&cipso_v4_doi_list_lock);
675 list_del_rcu(&doi_def->list);
676 spin_unlock(&cipso_v4_doi_list_lock);
677
678 cipso_v4_cache_invalidate();
679 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695int cipso_v4_doi_walk(u32 *skip_cnt,
696 int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
697 void *cb_arg)
698{
699 int ret_val = -ENOENT;
700 u32 doi_cnt = 0;
701 struct cipso_v4_doi *iter_doi;
702
703 rcu_read_lock();
704 list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
705 if (atomic_read(&iter_doi->refcount) > 0) {
706 if (doi_cnt++ < *skip_cnt)
707 continue;
708 ret_val = callback(iter_doi, cb_arg);
709 if (ret_val < 0) {
710 doi_cnt--;
711 goto doi_walk_return;
712 }
713 }
714
715doi_walk_return:
716 rcu_read_unlock();
717 *skip_cnt = doi_cnt;
718 return ret_val;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
737{
738 switch (doi_def->type) {
739 case CIPSO_V4_MAP_PASS:
740 return 0;
741 case CIPSO_V4_MAP_TRANS:
742 if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
743 return 0;
744 break;
745 }
746
747 return -EFAULT;
748}
749
750
751
752
753
754
755
756
757
758
759
760
761
762static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
763 u32 host_lvl,
764 u32 *net_lvl)
765{
766 switch (doi_def->type) {
767 case CIPSO_V4_MAP_PASS:
768 *net_lvl = host_lvl;
769 return 0;
770 case CIPSO_V4_MAP_TRANS:
771 if (host_lvl < doi_def->map.std->lvl.local_size &&
772 doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
773 *net_lvl = doi_def->map.std->lvl.local[host_lvl];
774 return 0;
775 }
776 return -EPERM;
777 }
778
779 return -EINVAL;
780}
781
782
783
784
785
786
787
788
789
790
791
792
793
794static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
795 u32 net_lvl,
796 u32 *host_lvl)
797{
798 struct cipso_v4_std_map_tbl *map_tbl;
799
800 switch (doi_def->type) {
801 case CIPSO_V4_MAP_PASS:
802 *host_lvl = net_lvl;
803 return 0;
804 case CIPSO_V4_MAP_TRANS:
805 map_tbl = doi_def->map.std;
806 if (net_lvl < map_tbl->lvl.cipso_size &&
807 map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) {
808 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
809 return 0;
810 }
811 return -EPERM;
812 }
813
814 return -EINVAL;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
830 const unsigned char *bitmap,
831 u32 bitmap_len)
832{
833 int cat = -1;
834 u32 bitmap_len_bits = bitmap_len * 8;
835 u32 cipso_cat_size;
836 u32 *cipso_array;
837
838 switch (doi_def->type) {
839 case CIPSO_V4_MAP_PASS:
840 return 0;
841 case CIPSO_V4_MAP_TRANS:
842 cipso_cat_size = doi_def->map.std->cat.cipso_size;
843 cipso_array = doi_def->map.std->cat.cipso;
844 for (;;) {
845 cat = cipso_v4_bitmap_walk(bitmap,
846 bitmap_len_bits,
847 cat + 1,
848 1);
849 if (cat < 0)
850 break;
851 if (cat >= cipso_cat_size ||
852 cipso_array[cat] >= CIPSO_V4_INV_CAT)
853 return -EFAULT;
854 }
855
856 if (cat == -1)
857 return 0;
858 break;
859 }
860
861 return -EFAULT;
862}
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
878 const struct netlbl_lsm_secattr *secattr,
879 unsigned char *net_cat,
880 u32 net_cat_len)
881{
882 int host_spot = -1;
883 u32 net_spot = CIPSO_V4_INV_CAT;
884 u32 net_spot_max = 0;
885 u32 net_clen_bits = net_cat_len * 8;
886 u32 host_cat_size = 0;
887 u32 *host_cat_array = NULL;
888
889 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
890 host_cat_size = doi_def->map.std->cat.local_size;
891 host_cat_array = doi_def->map.std->cat.local;
892 }
893
894 for (;;) {
895 host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
896 host_spot + 1);
897 if (host_spot < 0)
898 break;
899
900 switch (doi_def->type) {
901 case CIPSO_V4_MAP_PASS:
902 net_spot = host_spot;
903 break;
904 case CIPSO_V4_MAP_TRANS:
905 if (host_spot >= host_cat_size)
906 return -EPERM;
907 net_spot = host_cat_array[host_spot];
908 if (net_spot >= CIPSO_V4_INV_CAT)
909 return -EPERM;
910 break;
911 }
912 if (net_spot >= net_clen_bits)
913 return -ENOSPC;
914 cipso_v4_bitmap_setbit(net_cat, net_spot, 1);
915
916 if (net_spot > net_spot_max)
917 net_spot_max = net_spot;
918 }
919
920 if (++net_spot_max % 8)
921 return net_spot_max / 8 + 1;
922 return net_spot_max / 8;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
939 const unsigned char *net_cat,
940 u32 net_cat_len,
941 struct netlbl_lsm_secattr *secattr)
942{
943 int ret_val;
944 int net_spot = -1;
945 u32 host_spot = CIPSO_V4_INV_CAT;
946 u32 net_clen_bits = net_cat_len * 8;
947 u32 net_cat_size = 0;
948 u32 *net_cat_array = NULL;
949
950 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
951 net_cat_size = doi_def->map.std->cat.cipso_size;
952 net_cat_array = doi_def->map.std->cat.cipso;
953 }
954
955 for (;;) {
956 net_spot = cipso_v4_bitmap_walk(net_cat,
957 net_clen_bits,
958 net_spot + 1,
959 1);
960 if (net_spot < 0) {
961 if (net_spot == -2)
962 return -EFAULT;
963 return 0;
964 }
965
966 switch (doi_def->type) {
967 case CIPSO_V4_MAP_PASS:
968 host_spot = net_spot;
969 break;
970 case CIPSO_V4_MAP_TRANS:
971 if (net_spot >= net_cat_size)
972 return -EPERM;
973 host_spot = net_cat_array[net_spot];
974 if (host_spot >= CIPSO_V4_INV_CAT)
975 return -EPERM;
976 break;
977 }
978 ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
979 host_spot,
980 GFP_ATOMIC);
981 if (ret_val != 0)
982 return ret_val;
983 }
984
985 return -EINVAL;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
1001 const unsigned char *enumcat,
1002 u32 enumcat_len)
1003{
1004 u16 cat;
1005 int cat_prev = -1;
1006 u32 iter;
1007
1008 if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
1009 return -EFAULT;
1010
1011 for (iter = 0; iter < enumcat_len; iter += 2) {
1012 cat = get_unaligned_be16(&enumcat[iter]);
1013 if (cat <= cat_prev)
1014 return -EFAULT;
1015 cat_prev = cat;
1016 }
1017
1018 return 0;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
1036 const struct netlbl_lsm_secattr *secattr,
1037 unsigned char *net_cat,
1038 u32 net_cat_len)
1039{
1040 int cat = -1;
1041 u32 cat_iter = 0;
1042
1043 for (;;) {
1044 cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
1045 cat + 1);
1046 if (cat < 0)
1047 break;
1048 if ((cat_iter + 2) > net_cat_len)
1049 return -ENOSPC;
1050
1051 *((__be16 *)&net_cat[cat_iter]) = htons(cat);
1052 cat_iter += 2;
1053 }
1054
1055 return cat_iter;
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
1072 const unsigned char *net_cat,
1073 u32 net_cat_len,
1074 struct netlbl_lsm_secattr *secattr)
1075{
1076 int ret_val;
1077 u32 iter;
1078
1079 for (iter = 0; iter < net_cat_len; iter += 2) {
1080 ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
1081 get_unaligned_be16(&net_cat[iter]),
1082 GFP_ATOMIC);
1083 if (ret_val != 0)
1084 return ret_val;
1085 }
1086
1087 return 0;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
1103 const unsigned char *rngcat,
1104 u32 rngcat_len)
1105{
1106 u16 cat_high;
1107 u16 cat_low;
1108 u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
1109 u32 iter;
1110
1111 if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
1112 return -EFAULT;
1113
1114 for (iter = 0; iter < rngcat_len; iter += 4) {
1115 cat_high = get_unaligned_be16(&rngcat[iter]);
1116 if ((iter + 4) <= rngcat_len)
1117 cat_low = get_unaligned_be16(&rngcat[iter + 2]);
1118 else
1119 cat_low = 0;
1120
1121 if (cat_high > cat_prev)
1122 return -EFAULT;
1123
1124 cat_prev = cat_low;
1125 }
1126
1127 return 0;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
1145 const struct netlbl_lsm_secattr *secattr,
1146 unsigned char *net_cat,
1147 u32 net_cat_len)
1148{
1149 int iter = -1;
1150 u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2];
1151 u32 array_cnt = 0;
1152 u32 cat_size = 0;
1153
1154
1155 if (net_cat_len >
1156 (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN))
1157 return -ENOSPC;
1158
1159 for (;;) {
1160 iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat,
1161 iter + 1);
1162 if (iter < 0)
1163 break;
1164 cat_size += (iter == 0 ? 0 : sizeof(u16));
1165 if (cat_size > net_cat_len)
1166 return -ENOSPC;
1167 array[array_cnt++] = iter;
1168
1169 iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat,
1170 iter);
1171 if (iter < 0)
1172 return -EFAULT;
1173 cat_size += sizeof(u16);
1174 if (cat_size > net_cat_len)
1175 return -ENOSPC;
1176 array[array_cnt++] = iter;
1177 }
1178
1179 for (iter = 0; array_cnt > 0;) {
1180 *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
1181 iter += 2;
1182 array_cnt--;
1183 if (array[array_cnt] != 0) {
1184 *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
1185 iter += 2;
1186 }
1187 }
1188
1189 return cat_size;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
1206 const unsigned char *net_cat,
1207 u32 net_cat_len,
1208 struct netlbl_lsm_secattr *secattr)
1209{
1210 int ret_val;
1211 u32 net_iter;
1212 u16 cat_low;
1213 u16 cat_high;
1214
1215 for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
1216 cat_high = get_unaligned_be16(&net_cat[net_iter]);
1217 if ((net_iter + 4) <= net_cat_len)
1218 cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
1219 else
1220 cat_low = 0;
1221
1222 ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat,
1223 cat_low,
1224 cat_high,
1225 GFP_ATOMIC);
1226 if (ret_val != 0)
1227 return ret_val;
1228 }
1229
1230 return 0;
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
1248 unsigned char *buf,
1249 u32 len)
1250{
1251 buf[0] = IPOPT_CIPSO;
1252 buf[1] = CIPSO_V4_HDR_LEN + len;
1253 *(__be32 *)&buf[2] = htonl(doi_def->doi);
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
1271 const struct netlbl_lsm_secattr *secattr,
1272 unsigned char *buffer,
1273 u32 buffer_len)
1274{
1275 int ret_val;
1276 u32 tag_len;
1277 u32 level;
1278
1279 if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
1280 return -EPERM;
1281
1282 ret_val = cipso_v4_map_lvl_hton(doi_def,
1283 secattr->attr.mls.lvl,
1284 &level);
1285 if (ret_val != 0)
1286 return ret_val;
1287
1288 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1289 ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
1290 secattr,
1291 &buffer[4],
1292 buffer_len - 4);
1293 if (ret_val < 0)
1294 return ret_val;
1295
1296
1297
1298
1299 if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
1300 tag_len = 14;
1301 else
1302 tag_len = 4 + ret_val;
1303 } else
1304 tag_len = 4;
1305
1306 buffer[0] = CIPSO_V4_TAG_RBITMAP;
1307 buffer[1] = tag_len;
1308 buffer[3] = level;
1309
1310 return tag_len;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
1326 const unsigned char *tag,
1327 struct netlbl_lsm_secattr *secattr)
1328{
1329 int ret_val;
1330 u8 tag_len = tag[1];
1331 u32 level;
1332
1333 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1334 if (ret_val != 0)
1335 return ret_val;
1336 secattr->attr.mls.lvl = level;
1337 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1338
1339 if (tag_len > 4) {
1340 secattr->attr.mls.cat =
1341 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1342 if (secattr->attr.mls.cat == NULL)
1343 return -ENOMEM;
1344
1345 ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
1346 &tag[4],
1347 tag_len - 4,
1348 secattr);
1349 if (ret_val != 0) {
1350 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1351 return ret_val;
1352 }
1353
1354 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1355 }
1356
1357 return 0;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
1373 const struct netlbl_lsm_secattr *secattr,
1374 unsigned char *buffer,
1375 u32 buffer_len)
1376{
1377 int ret_val;
1378 u32 tag_len;
1379 u32 level;
1380
1381 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1382 return -EPERM;
1383
1384 ret_val = cipso_v4_map_lvl_hton(doi_def,
1385 secattr->attr.mls.lvl,
1386 &level);
1387 if (ret_val != 0)
1388 return ret_val;
1389
1390 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1391 ret_val = cipso_v4_map_cat_enum_hton(doi_def,
1392 secattr,
1393 &buffer[4],
1394 buffer_len - 4);
1395 if (ret_val < 0)
1396 return ret_val;
1397
1398 tag_len = 4 + ret_val;
1399 } else
1400 tag_len = 4;
1401
1402 buffer[0] = CIPSO_V4_TAG_ENUM;
1403 buffer[1] = tag_len;
1404 buffer[3] = level;
1405
1406 return tag_len;
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
1422 const unsigned char *tag,
1423 struct netlbl_lsm_secattr *secattr)
1424{
1425 int ret_val;
1426 u8 tag_len = tag[1];
1427 u32 level;
1428
1429 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1430 if (ret_val != 0)
1431 return ret_val;
1432 secattr->attr.mls.lvl = level;
1433 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1434
1435 if (tag_len > 4) {
1436 secattr->attr.mls.cat =
1437 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1438 if (secattr->attr.mls.cat == NULL)
1439 return -ENOMEM;
1440
1441 ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
1442 &tag[4],
1443 tag_len - 4,
1444 secattr);
1445 if (ret_val != 0) {
1446 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1447 return ret_val;
1448 }
1449
1450 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1451 }
1452
1453 return 0;
1454}
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
1469 const struct netlbl_lsm_secattr *secattr,
1470 unsigned char *buffer,
1471 u32 buffer_len)
1472{
1473 int ret_val;
1474 u32 tag_len;
1475 u32 level;
1476
1477 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1478 return -EPERM;
1479
1480 ret_val = cipso_v4_map_lvl_hton(doi_def,
1481 secattr->attr.mls.lvl,
1482 &level);
1483 if (ret_val != 0)
1484 return ret_val;
1485
1486 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1487 ret_val = cipso_v4_map_cat_rng_hton(doi_def,
1488 secattr,
1489 &buffer[4],
1490 buffer_len - 4);
1491 if (ret_val < 0)
1492 return ret_val;
1493
1494 tag_len = 4 + ret_val;
1495 } else
1496 tag_len = 4;
1497
1498 buffer[0] = CIPSO_V4_TAG_RANGE;
1499 buffer[1] = tag_len;
1500 buffer[3] = level;
1501
1502 return tag_len;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
1517 const unsigned char *tag,
1518 struct netlbl_lsm_secattr *secattr)
1519{
1520 int ret_val;
1521 u8 tag_len = tag[1];
1522 u32 level;
1523
1524 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1525 if (ret_val != 0)
1526 return ret_val;
1527 secattr->attr.mls.lvl = level;
1528 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1529
1530 if (tag_len > 4) {
1531 secattr->attr.mls.cat =
1532 netlbl_secattr_catmap_alloc(GFP_ATOMIC);
1533 if (secattr->attr.mls.cat == NULL)
1534 return -ENOMEM;
1535
1536 ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
1537 &tag[4],
1538 tag_len - 4,
1539 secattr);
1540 if (ret_val != 0) {
1541 netlbl_secattr_catmap_free(secattr->attr.mls.cat);
1542 return ret_val;
1543 }
1544
1545 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1546 }
1547
1548 return 0;
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def,
1564 const struct netlbl_lsm_secattr *secattr,
1565 unsigned char *buffer,
1566 u32 buffer_len)
1567{
1568 if (!(secattr->flags & NETLBL_SECATTR_SECID))
1569 return -EPERM;
1570
1571 buffer[0] = CIPSO_V4_TAG_LOCAL;
1572 buffer[1] = CIPSO_V4_TAG_LOC_BLEN;
1573 *(u32 *)&buffer[2] = secattr->attr.secid;
1574
1575 return CIPSO_V4_TAG_LOC_BLEN;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
1590 const unsigned char *tag,
1591 struct netlbl_lsm_secattr *secattr)
1592{
1593 secattr->attr.secid = *(u32 *)&tag[2];
1594 secattr->flags |= NETLBL_SECATTR_SECID;
1595
1596 return 0;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1619{
1620 unsigned char *opt = *option;
1621 unsigned char *tag;
1622 unsigned char opt_iter;
1623 unsigned char err_offset = 0;
1624 u8 opt_len;
1625 u8 tag_len;
1626 struct cipso_v4_doi *doi_def = NULL;
1627 u32 tag_iter;
1628
1629
1630 opt_len = opt[1];
1631 if (opt_len < 8) {
1632 err_offset = 1;
1633 goto validate_return;
1634 }
1635
1636 rcu_read_lock();
1637 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
1638 if (doi_def == NULL) {
1639 err_offset = 2;
1640 goto validate_return_locked;
1641 }
1642
1643 opt_iter = CIPSO_V4_HDR_LEN;
1644 tag = opt + opt_iter;
1645 while (opt_iter < opt_len) {
1646 for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
1647 if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
1648 ++tag_iter == CIPSO_V4_TAG_MAXCNT) {
1649 err_offset = opt_iter;
1650 goto validate_return_locked;
1651 }
1652
1653 tag_len = tag[1];
1654 if (tag_len > (opt_len - opt_iter)) {
1655 err_offset = opt_iter + 1;
1656 goto validate_return_locked;
1657 }
1658
1659 switch (tag[0]) {
1660 case CIPSO_V4_TAG_RBITMAP:
1661 if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
1662 err_offset = opt_iter + 1;
1663 goto validate_return_locked;
1664 }
1665
1666
1667
1668
1669
1670
1671
1672
1673 if (cipso_v4_rbm_strictvalid) {
1674 if (cipso_v4_map_lvl_valid(doi_def,
1675 tag[3]) < 0) {
1676 err_offset = opt_iter + 3;
1677 goto validate_return_locked;
1678 }
1679 if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
1680 cipso_v4_map_cat_rbm_valid(doi_def,
1681 &tag[4],
1682 tag_len - 4) < 0) {
1683 err_offset = opt_iter + 4;
1684 goto validate_return_locked;
1685 }
1686 }
1687 break;
1688 case CIPSO_V4_TAG_ENUM:
1689 if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
1690 err_offset = opt_iter + 1;
1691 goto validate_return_locked;
1692 }
1693
1694 if (cipso_v4_map_lvl_valid(doi_def,
1695 tag[3]) < 0) {
1696 err_offset = opt_iter + 3;
1697 goto validate_return_locked;
1698 }
1699 if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
1700 cipso_v4_map_cat_enum_valid(doi_def,
1701 &tag[4],
1702 tag_len - 4) < 0) {
1703 err_offset = opt_iter + 4;
1704 goto validate_return_locked;
1705 }
1706 break;
1707 case CIPSO_V4_TAG_RANGE:
1708 if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
1709 err_offset = opt_iter + 1;
1710 goto validate_return_locked;
1711 }
1712
1713 if (cipso_v4_map_lvl_valid(doi_def,
1714 tag[3]) < 0) {
1715 err_offset = opt_iter + 3;
1716 goto validate_return_locked;
1717 }
1718 if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
1719 cipso_v4_map_cat_rng_valid(doi_def,
1720 &tag[4],
1721 tag_len - 4) < 0) {
1722 err_offset = opt_iter + 4;
1723 goto validate_return_locked;
1724 }
1725 break;
1726 case CIPSO_V4_TAG_LOCAL:
1727
1728
1729
1730 if (!(skb->dev->flags & IFF_LOOPBACK)) {
1731 err_offset = opt_iter;
1732 goto validate_return_locked;
1733 }
1734 if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
1735 err_offset = opt_iter + 1;
1736 goto validate_return_locked;
1737 }
1738 break;
1739 default:
1740 err_offset = opt_iter;
1741 goto validate_return_locked;
1742 }
1743
1744 tag += tag_len;
1745 opt_iter += tag_len;
1746 }
1747
1748validate_return_locked:
1749 rcu_read_unlock();
1750validate_return:
1751 *option = opt + err_offset;
1752 return err_offset;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1783{
1784 if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
1785 return;
1786
1787 if (gateway)
1788 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
1789 else
1790 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1807 const struct cipso_v4_doi *doi_def,
1808 const struct netlbl_lsm_secattr *secattr)
1809{
1810 int ret_val;
1811 u32 iter;
1812
1813 if (buf_len <= CIPSO_V4_HDR_LEN)
1814 return -ENOSPC;
1815
1816
1817
1818
1819 iter = 0;
1820 do {
1821 memset(buf, 0, buf_len);
1822 switch (doi_def->tags[iter]) {
1823 case CIPSO_V4_TAG_RBITMAP:
1824 ret_val = cipso_v4_gentag_rbm(doi_def,
1825 secattr,
1826 &buf[CIPSO_V4_HDR_LEN],
1827 buf_len - CIPSO_V4_HDR_LEN);
1828 break;
1829 case CIPSO_V4_TAG_ENUM:
1830 ret_val = cipso_v4_gentag_enum(doi_def,
1831 secattr,
1832 &buf[CIPSO_V4_HDR_LEN],
1833 buf_len - CIPSO_V4_HDR_LEN);
1834 break;
1835 case CIPSO_V4_TAG_RANGE:
1836 ret_val = cipso_v4_gentag_rng(doi_def,
1837 secattr,
1838 &buf[CIPSO_V4_HDR_LEN],
1839 buf_len - CIPSO_V4_HDR_LEN);
1840 break;
1841 case CIPSO_V4_TAG_LOCAL:
1842 ret_val = cipso_v4_gentag_loc(doi_def,
1843 secattr,
1844 &buf[CIPSO_V4_HDR_LEN],
1845 buf_len - CIPSO_V4_HDR_LEN);
1846 break;
1847 default:
1848 return -EPERM;
1849 }
1850
1851 iter++;
1852 } while (ret_val < 0 &&
1853 iter < CIPSO_V4_TAG_MAXCNT &&
1854 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
1855 if (ret_val < 0)
1856 return ret_val;
1857 cipso_v4_gentag_hdr(doi_def, buf, ret_val);
1858 return CIPSO_V4_HDR_LEN + ret_val;
1859}
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875int cipso_v4_sock_setattr(struct sock *sk,
1876 const struct cipso_v4_doi *doi_def,
1877 const struct netlbl_lsm_secattr *secattr)
1878{
1879 int ret_val = -EPERM;
1880 unsigned char *buf = NULL;
1881 u32 buf_len;
1882 u32 opt_len;
1883 struct ip_options *opt = NULL;
1884 struct inet_sock *sk_inet;
1885 struct inet_connection_sock *sk_conn;
1886
1887
1888
1889
1890
1891 if (sk == NULL)
1892 return 0;
1893
1894
1895
1896
1897 buf_len = CIPSO_V4_OPT_LEN_MAX;
1898 buf = kmalloc(buf_len, GFP_ATOMIC);
1899 if (buf == NULL) {
1900 ret_val = -ENOMEM;
1901 goto socket_setattr_failure;
1902 }
1903
1904 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1905 if (ret_val < 0)
1906 goto socket_setattr_failure;
1907 buf_len = ret_val;
1908
1909
1910
1911
1912
1913 opt_len = (buf_len + 3) & ~3;
1914 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1915 if (opt == NULL) {
1916 ret_val = -ENOMEM;
1917 goto socket_setattr_failure;
1918 }
1919 memcpy(opt->__data, buf, buf_len);
1920 opt->optlen = opt_len;
1921 opt->cipso = sizeof(struct iphdr);
1922 kfree(buf);
1923 buf = NULL;
1924
1925 sk_inet = inet_sk(sk);
1926 if (sk_inet->is_icsk) {
1927 sk_conn = inet_csk(sk);
1928 if (sk_inet->opt)
1929 sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;
1930 sk_conn->icsk_ext_hdr_len += opt->optlen;
1931 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1932 }
1933 opt = xchg(&sk_inet->opt, opt);
1934 kfree(opt);
1935
1936 return 0;
1937
1938socket_setattr_failure:
1939 kfree(buf);
1940 kfree(opt);
1941 return ret_val;
1942}
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956int cipso_v4_req_setattr(struct request_sock *req,
1957 const struct cipso_v4_doi *doi_def,
1958 const struct netlbl_lsm_secattr *secattr)
1959{
1960 int ret_val = -EPERM;
1961 unsigned char *buf = NULL;
1962 u32 buf_len;
1963 u32 opt_len;
1964 struct ip_options *opt = NULL;
1965 struct inet_request_sock *req_inet;
1966
1967
1968
1969
1970 buf_len = CIPSO_V4_OPT_LEN_MAX;
1971 buf = kmalloc(buf_len, GFP_ATOMIC);
1972 if (buf == NULL) {
1973 ret_val = -ENOMEM;
1974 goto req_setattr_failure;
1975 }
1976
1977 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1978 if (ret_val < 0)
1979 goto req_setattr_failure;
1980 buf_len = ret_val;
1981
1982
1983
1984
1985
1986 opt_len = (buf_len + 3) & ~3;
1987 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1988 if (opt == NULL) {
1989 ret_val = -ENOMEM;
1990 goto req_setattr_failure;
1991 }
1992 memcpy(opt->__data, buf, buf_len);
1993 opt->optlen = opt_len;
1994 opt->cipso = sizeof(struct iphdr);
1995 kfree(buf);
1996 buf = NULL;
1997
1998 req_inet = inet_rsk(req);
1999 opt = xchg(&req_inet->opt, opt);
2000 kfree(opt);
2001
2002 return 0;
2003
2004req_setattr_failure:
2005 kfree(buf);
2006 kfree(opt);
2007 return ret_val;
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020int cipso_v4_delopt(struct ip_options **opt_ptr)
2021{
2022 int hdr_delta = 0;
2023 struct ip_options *opt = *opt_ptr;
2024
2025 if (opt->srr || opt->rr || opt->ts || opt->router_alert) {
2026 u8 cipso_len;
2027 u8 cipso_off;
2028 unsigned char *cipso_ptr;
2029 int iter;
2030 int optlen_new;
2031
2032 cipso_off = opt->cipso - sizeof(struct iphdr);
2033 cipso_ptr = &opt->__data[cipso_off];
2034 cipso_len = cipso_ptr[1];
2035
2036 if (opt->srr > opt->cipso)
2037 opt->srr -= cipso_len;
2038 if (opt->rr > opt->cipso)
2039 opt->rr -= cipso_len;
2040 if (opt->ts > opt->cipso)
2041 opt->ts -= cipso_len;
2042 if (opt->router_alert > opt->cipso)
2043 opt->router_alert -= cipso_len;
2044 opt->cipso = 0;
2045
2046 memmove(cipso_ptr, cipso_ptr + cipso_len,
2047 opt->optlen - cipso_off - cipso_len);
2048
2049
2050
2051
2052
2053
2054 iter = 0;
2055 optlen_new = 0;
2056 while (iter < opt->optlen)
2057 if (opt->__data[iter] != IPOPT_NOP) {
2058 iter += opt->__data[iter + 1];
2059 optlen_new = iter;
2060 } else
2061 iter++;
2062 hdr_delta = opt->optlen;
2063 opt->optlen = (optlen_new + 3) & ~3;
2064 hdr_delta -= opt->optlen;
2065 } else {
2066
2067
2068 *opt_ptr = NULL;
2069 hdr_delta = opt->optlen;
2070 kfree(opt);
2071 }
2072
2073 return hdr_delta;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084void cipso_v4_sock_delattr(struct sock *sk)
2085{
2086 int hdr_delta;
2087 struct ip_options *opt;
2088 struct inet_sock *sk_inet;
2089
2090 sk_inet = inet_sk(sk);
2091 opt = sk_inet->opt;
2092 if (opt == NULL || opt->cipso == 0)
2093 return;
2094
2095 hdr_delta = cipso_v4_delopt(&sk_inet->opt);
2096 if (sk_inet->is_icsk && hdr_delta > 0) {
2097 struct inet_connection_sock *sk_conn = inet_csk(sk);
2098 sk_conn->icsk_ext_hdr_len -= hdr_delta;
2099 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
2100 }
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111void cipso_v4_req_delattr(struct request_sock *req)
2112{
2113 struct ip_options *opt;
2114 struct inet_request_sock *req_inet;
2115
2116 req_inet = inet_rsk(req);
2117 opt = req_inet->opt;
2118 if (opt == NULL || opt->cipso == 0)
2119 return;
2120
2121 cipso_v4_delopt(&req_inet->opt);
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134static int cipso_v4_getattr(const unsigned char *cipso,
2135 struct netlbl_lsm_secattr *secattr)
2136{
2137 int ret_val = -ENOMSG;
2138 u32 doi;
2139 struct cipso_v4_doi *doi_def;
2140
2141 if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
2142 return 0;
2143
2144 doi = get_unaligned_be32(&cipso[2]);
2145 rcu_read_lock();
2146 doi_def = cipso_v4_doi_search(doi);
2147 if (doi_def == NULL)
2148 goto getattr_return;
2149
2150
2151
2152 switch (cipso[6]) {
2153 case CIPSO_V4_TAG_RBITMAP:
2154 ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr);
2155 break;
2156 case CIPSO_V4_TAG_ENUM:
2157 ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr);
2158 break;
2159 case CIPSO_V4_TAG_RANGE:
2160 ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
2161 break;
2162 case CIPSO_V4_TAG_LOCAL:
2163 ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr);
2164 break;
2165 }
2166 if (ret_val == 0)
2167 secattr->type = NETLBL_NLTYPE_CIPSOV4;
2168
2169getattr_return:
2170 rcu_read_unlock();
2171 return ret_val;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2187{
2188 struct ip_options *opt;
2189
2190 opt = inet_sk(sk)->opt;
2191 if (opt == NULL || opt->cipso == 0)
2192 return -ENOMSG;
2193
2194 return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),
2195 secattr);
2196}
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208int cipso_v4_skbuff_setattr(struct sk_buff *skb,
2209 const struct cipso_v4_doi *doi_def,
2210 const struct netlbl_lsm_secattr *secattr)
2211{
2212 int ret_val;
2213 struct iphdr *iph;
2214 struct ip_options *opt = &IPCB(skb)->opt;
2215 unsigned char buf[CIPSO_V4_OPT_LEN_MAX];
2216 u32 buf_len = CIPSO_V4_OPT_LEN_MAX;
2217 u32 opt_len;
2218 int len_delta;
2219
2220 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
2221 if (ret_val < 0)
2222 return ret_val;
2223 buf_len = ret_val;
2224 opt_len = (buf_len + 3) & ~3;
2225
2226
2227
2228
2229
2230
2231
2232 len_delta = opt_len - opt->optlen;
2233
2234
2235
2236 ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
2237 if (ret_val < 0)
2238 return ret_val;
2239
2240 if (len_delta > 0) {
2241
2242
2243 iph = ip_hdr(skb);
2244 skb_push(skb, len_delta);
2245 memmove((char *)iph - len_delta, iph, iph->ihl << 2);
2246 skb_reset_network_header(skb);
2247 iph = ip_hdr(skb);
2248 } else if (len_delta < 0) {
2249 iph = ip_hdr(skb);
2250 memset(iph + 1, IPOPT_NOP, opt->optlen);
2251 } else
2252 iph = ip_hdr(skb);
2253
2254 if (opt->optlen > 0)
2255 memset(opt, 0, sizeof(*opt));
2256 opt->optlen = opt_len;
2257 opt->cipso = sizeof(struct iphdr);
2258 opt->is_changed = 1;
2259
2260
2261
2262
2263
2264
2265 memcpy(iph + 1, buf, buf_len);
2266 if (opt_len > buf_len)
2267 memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
2268 if (len_delta != 0) {
2269 iph->ihl = 5 + (opt_len >> 2);
2270 iph->tot_len = htons(skb->len);
2271 }
2272 ip_send_check(iph);
2273
2274 return 0;
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286int cipso_v4_skbuff_delattr(struct sk_buff *skb)
2287{
2288 int ret_val;
2289 struct iphdr *iph;
2290 struct ip_options *opt = &IPCB(skb)->opt;
2291 unsigned char *cipso_ptr;
2292
2293 if (opt->cipso == 0)
2294 return 0;
2295
2296
2297 ret_val = skb_cow(skb, skb_headroom(skb));
2298 if (ret_val < 0)
2299 return ret_val;
2300
2301
2302
2303
2304
2305 iph = ip_hdr(skb);
2306 cipso_ptr = (unsigned char *)iph + opt->cipso;
2307 memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
2308 opt->cipso = 0;
2309 opt->is_changed = 1;
2310
2311 ip_send_check(iph);
2312
2313 return 0;
2314}
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
2327 struct netlbl_lsm_secattr *secattr)
2328{
2329 return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr);
2330}
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344static int __init cipso_v4_init(void)
2345{
2346 int ret_val;
2347
2348 ret_val = cipso_v4_cache_init();
2349 if (ret_val != 0)
2350 panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n",
2351 ret_val);
2352
2353 return 0;
2354}
2355
2356subsys_initcall(cipso_v4_init);
2357