1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/security.h>
34#include <linux/completion.h>
35#include <linux/list.h>
36
37#include <rdma/ib_verbs.h>
38#include <rdma/ib_cache.h>
39#include "core_priv.h"
40#include "mad_priv.h"
41
42static LIST_HEAD(mad_agent_list);
43
44static DEFINE_SPINLOCK(mad_agent_list_lock);
45
46static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
47{
48 struct pkey_index_qp_list *pkey = NULL;
49 struct pkey_index_qp_list *tmp_pkey;
50 struct ib_device *dev = pp->sec->dev;
51
52 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
53 list_for_each_entry(tmp_pkey,
54 &dev->port_pkey_list[pp->port_num].pkey_list,
55 pkey_index_list) {
56 if (tmp_pkey->pkey_index == pp->pkey_index) {
57 pkey = tmp_pkey;
58 break;
59 }
60 }
61 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
62 return pkey;
63}
64
65static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
66 u16 *pkey,
67 u64 *subnet_prefix)
68{
69 struct ib_device *dev = pp->sec->dev;
70 int ret;
71
72 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
73 if (ret)
74 return ret;
75
76 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
77
78 return ret;
79}
80
81static int enforce_qp_pkey_security(u16 pkey,
82 u64 subnet_prefix,
83 struct ib_qp_security *qp_sec)
84{
85 struct ib_qp_security *shared_qp_sec;
86 int ret;
87
88 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
89 if (ret)
90 return ret;
91
92 list_for_each_entry(shared_qp_sec,
93 &qp_sec->shared_qp_list,
94 shared_qp_list) {
95 ret = security_ib_pkey_access(shared_qp_sec->security,
96 subnet_prefix,
97 pkey);
98 if (ret)
99 return ret;
100 }
101 return 0;
102}
103
104
105
106
107
108
109
110
111
112static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
113 struct ib_qp_security *sec)
114{
115 u64 subnet_prefix;
116 u16 pkey;
117 int ret = 0;
118
119 if (!pps)
120 return 0;
121
122 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
123 ret = get_pkey_and_subnet_prefix(&pps->main,
124 &pkey,
125 &subnet_prefix);
126 if (ret)
127 return ret;
128
129 ret = enforce_qp_pkey_security(pkey,
130 subnet_prefix,
131 sec);
132 if (ret)
133 return ret;
134 }
135
136 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
137 ret = get_pkey_and_subnet_prefix(&pps->alt,
138 &pkey,
139 &subnet_prefix);
140 if (ret)
141 return ret;
142
143 ret = enforce_qp_pkey_security(pkey,
144 subnet_prefix,
145 sec);
146 }
147
148 return ret;
149}
150
151
152
153
154static void qp_to_error(struct ib_qp_security *sec)
155{
156 struct ib_qp_security *shared_qp_sec;
157 struct ib_qp_attr attr = {
158 .qp_state = IB_QPS_ERR
159 };
160 struct ib_event event = {
161 .event = IB_EVENT_QP_FATAL
162 };
163
164
165
166
167
168 if (sec->destroying)
169 return;
170
171 ib_modify_qp(sec->qp,
172 &attr,
173 IB_QP_STATE);
174
175 if (sec->qp->event_handler && sec->qp->qp_context) {
176 event.element.qp = sec->qp;
177 sec->qp->event_handler(&event,
178 sec->qp->qp_context);
179 }
180
181 list_for_each_entry(shared_qp_sec,
182 &sec->shared_qp_list,
183 shared_qp_list) {
184 struct ib_qp *qp = shared_qp_sec->qp;
185
186 if (qp->event_handler && qp->qp_context) {
187 event.element.qp = qp;
188 event.device = qp->device;
189 qp->event_handler(&event,
190 qp->qp_context);
191 }
192 }
193}
194
195static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
196 struct ib_device *device,
197 u8 port_num,
198 u64 subnet_prefix)
199{
200 struct ib_port_pkey *pp, *tmp_pp;
201 bool comp;
202 LIST_HEAD(to_error_list);
203 u16 pkey_val;
204
205 if (!ib_get_cached_pkey(device,
206 port_num,
207 pkey->pkey_index,
208 &pkey_val)) {
209 spin_lock(&pkey->qp_list_lock);
210 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
211 if (atomic_read(&pp->sec->error_list_count))
212 continue;
213
214 if (enforce_qp_pkey_security(pkey_val,
215 subnet_prefix,
216 pp->sec)) {
217 atomic_inc(&pp->sec->error_list_count);
218 list_add(&pp->to_error_list,
219 &to_error_list);
220 }
221 }
222 spin_unlock(&pkey->qp_list_lock);
223 }
224
225 list_for_each_entry_safe(pp,
226 tmp_pp,
227 &to_error_list,
228 to_error_list) {
229 mutex_lock(&pp->sec->mutex);
230 qp_to_error(pp->sec);
231 list_del(&pp->to_error_list);
232 atomic_dec(&pp->sec->error_list_count);
233 comp = pp->sec->destroying;
234 mutex_unlock(&pp->sec->mutex);
235
236 if (comp)
237 complete(&pp->sec->error_complete);
238 }
239}
240
241
242
243
244static int port_pkey_list_insert(struct ib_port_pkey *pp)
245{
246 struct pkey_index_qp_list *tmp_pkey;
247 struct pkey_index_qp_list *pkey;
248 struct ib_device *dev;
249 u8 port_num = pp->port_num;
250 int ret = 0;
251
252 if (pp->state != IB_PORT_PKEY_VALID)
253 return 0;
254
255 dev = pp->sec->dev;
256
257 pkey = get_pkey_idx_qp_list(pp);
258
259 if (!pkey) {
260 bool found = false;
261
262 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
263 if (!pkey)
264 return -ENOMEM;
265
266 spin_lock(&dev->port_pkey_list[port_num].list_lock);
267
268
269
270 list_for_each_entry(tmp_pkey,
271 &dev->port_pkey_list[port_num].pkey_list,
272 pkey_index_list) {
273 if (tmp_pkey->pkey_index == pp->pkey_index) {
274 kfree(pkey);
275 pkey = tmp_pkey;
276 found = true;
277 break;
278 }
279 }
280
281 if (!found) {
282 pkey->pkey_index = pp->pkey_index;
283 spin_lock_init(&pkey->qp_list_lock);
284 INIT_LIST_HEAD(&pkey->qp_list);
285 list_add(&pkey->pkey_index_list,
286 &dev->port_pkey_list[port_num].pkey_list);
287 }
288 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
289 }
290
291 spin_lock(&pkey->qp_list_lock);
292 list_add(&pp->qp_list, &pkey->qp_list);
293 spin_unlock(&pkey->qp_list_lock);
294
295 pp->state = IB_PORT_PKEY_LISTED;
296
297 return ret;
298}
299
300
301
302
303static void port_pkey_list_remove(struct ib_port_pkey *pp)
304{
305 struct pkey_index_qp_list *pkey;
306
307 if (pp->state != IB_PORT_PKEY_LISTED)
308 return;
309
310 pkey = get_pkey_idx_qp_list(pp);
311
312 spin_lock(&pkey->qp_list_lock);
313 list_del(&pp->qp_list);
314 spin_unlock(&pkey->qp_list_lock);
315
316
317
318
319 pp->state = IB_PORT_PKEY_VALID;
320}
321
322static void destroy_qp_security(struct ib_qp_security *sec)
323{
324 security_ib_free_security(sec->security);
325 kfree(sec->ports_pkeys);
326 kfree(sec);
327}
328
329
330
331
332static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
333 const struct ib_qp_attr *qp_attr,
334 int qp_attr_mask)
335{
336 struct ib_ports_pkeys *new_pps;
337 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
338
339 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
340 if (!new_pps)
341 return NULL;
342
343 if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
344 if (!qp_pps) {
345 new_pps->main.port_num = qp_attr->port_num;
346 new_pps->main.pkey_index = qp_attr->pkey_index;
347 } else {
348 new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
349 qp_attr->port_num :
350 qp_pps->main.port_num;
351
352 new_pps->main.pkey_index =
353 (qp_attr_mask & IB_QP_PKEY_INDEX) ?
354 qp_attr->pkey_index :
355 qp_pps->main.pkey_index;
356 }
357 new_pps->main.state = IB_PORT_PKEY_VALID;
358 } else if (qp_pps) {
359 new_pps->main.port_num = qp_pps->main.port_num;
360 new_pps->main.pkey_index = qp_pps->main.pkey_index;
361 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
362 new_pps->main.state = IB_PORT_PKEY_VALID;
363 }
364
365 if (qp_attr_mask & IB_QP_ALT_PATH) {
366 new_pps->alt.port_num = qp_attr->alt_port_num;
367 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
368 new_pps->alt.state = IB_PORT_PKEY_VALID;
369 } else if (qp_pps) {
370 new_pps->alt.port_num = qp_pps->alt.port_num;
371 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
372 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
373 new_pps->alt.state = IB_PORT_PKEY_VALID;
374 }
375
376 new_pps->main.sec = qp->qp_sec;
377 new_pps->alt.sec = qp->qp_sec;
378 return new_pps;
379}
380
381int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
382{
383 struct ib_qp *real_qp = qp->real_qp;
384 int ret;
385
386 ret = ib_create_qp_security(qp, dev);
387
388 if (ret)
389 return ret;
390
391 if (!qp->qp_sec)
392 return 0;
393
394 mutex_lock(&real_qp->qp_sec->mutex);
395 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
396 qp->qp_sec);
397
398 if (ret)
399 goto ret;
400
401 if (qp != real_qp)
402 list_add(&qp->qp_sec->shared_qp_list,
403 &real_qp->qp_sec->shared_qp_list);
404ret:
405 mutex_unlock(&real_qp->qp_sec->mutex);
406 if (ret)
407 destroy_qp_security(qp->qp_sec);
408
409 return ret;
410}
411
412void ib_close_shared_qp_security(struct ib_qp_security *sec)
413{
414 struct ib_qp *real_qp = sec->qp->real_qp;
415
416 mutex_lock(&real_qp->qp_sec->mutex);
417 list_del(&sec->shared_qp_list);
418 mutex_unlock(&real_qp->qp_sec->mutex);
419
420 destroy_qp_security(sec);
421}
422
423int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
424{
425 u8 i = rdma_start_port(dev);
426 bool is_ib = false;
427 int ret;
428
429 while (i <= rdma_end_port(dev) && !is_ib)
430 is_ib = rdma_protocol_ib(dev, i++);
431
432
433 if (!is_ib)
434 return 0;
435
436 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
437 if (!qp->qp_sec)
438 return -ENOMEM;
439
440 qp->qp_sec->qp = qp;
441 qp->qp_sec->dev = dev;
442 mutex_init(&qp->qp_sec->mutex);
443 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
444 atomic_set(&qp->qp_sec->error_list_count, 0);
445 init_completion(&qp->qp_sec->error_complete);
446 ret = security_ib_alloc_security(&qp->qp_sec->security);
447 if (ret) {
448 kfree(qp->qp_sec);
449 qp->qp_sec = NULL;
450 }
451
452 return ret;
453}
454EXPORT_SYMBOL(ib_create_qp_security);
455
456void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
457{
458
459 if (!sec)
460 return;
461
462 mutex_lock(&sec->mutex);
463
464
465
466
467 if (sec->ports_pkeys) {
468 port_pkey_list_remove(&sec->ports_pkeys->main);
469 port_pkey_list_remove(&sec->ports_pkeys->alt);
470 }
471
472
473
474
475
476 sec->destroying = true;
477
478
479
480
481 sec->error_comps_pending = atomic_read(&sec->error_list_count);
482
483 mutex_unlock(&sec->mutex);
484}
485
486void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
487{
488 int ret;
489 int i;
490
491
492 if (!sec)
493 return;
494
495
496
497
498
499 for (i = 0; i < sec->error_comps_pending; i++)
500 wait_for_completion(&sec->error_complete);
501
502 mutex_lock(&sec->mutex);
503 sec->destroying = false;
504
505
506
507
508
509
510
511
512
513
514 if (sec->ports_pkeys) {
515 port_pkey_list_insert(&sec->ports_pkeys->main);
516 port_pkey_list_insert(&sec->ports_pkeys->alt);
517 }
518
519 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
520 if (ret)
521 qp_to_error(sec);
522
523 mutex_unlock(&sec->mutex);
524}
525
526void ib_destroy_qp_security_end(struct ib_qp_security *sec)
527{
528 int i;
529
530
531 if (!sec)
532 return;
533
534
535
536
537
538
539 for (i = 0; i < sec->error_comps_pending; i++)
540 wait_for_completion(&sec->error_complete);
541
542 destroy_qp_security(sec);
543}
544
545void ib_security_cache_change(struct ib_device *device,
546 u8 port_num,
547 u64 subnet_prefix)
548{
549 struct pkey_index_qp_list *pkey;
550
551 list_for_each_entry(pkey,
552 &device->port_pkey_list[port_num].pkey_list,
553 pkey_index_list) {
554 check_pkey_qps(pkey,
555 device,
556 port_num,
557 subnet_prefix);
558 }
559}
560
561void ib_security_release_port_pkey_list(struct ib_device *device)
562{
563 struct pkey_index_qp_list *pkey, *tmp_pkey;
564 int i;
565
566 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
567 list_for_each_entry_safe(pkey,
568 tmp_pkey,
569 &device->port_pkey_list[i].pkey_list,
570 pkey_index_list) {
571 list_del(&pkey->pkey_index_list);
572 kfree(pkey);
573 }
574 }
575}
576
577int ib_security_modify_qp(struct ib_qp *qp,
578 struct ib_qp_attr *qp_attr,
579 int qp_attr_mask,
580 struct ib_udata *udata)
581{
582 int ret = 0;
583 struct ib_ports_pkeys *tmp_pps;
584 struct ib_ports_pkeys *new_pps = NULL;
585 struct ib_qp *real_qp = qp->real_qp;
586 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
587 real_qp->qp_type == IB_QPT_GSI ||
588 real_qp->qp_type >= IB_QPT_RESERVED1);
589 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
590 (qp_attr_mask & IB_QP_ALT_PATH));
591
592 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
593 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
594 !real_qp->qp_sec),
595 "%s: QP security is not initialized for IB QP: %d\n",
596 __func__, real_qp->qp_num);
597
598
599
600
601
602
603
604 if (pps_change && !special_qp && real_qp->qp_sec) {
605 mutex_lock(&real_qp->qp_sec->mutex);
606 new_pps = get_new_pps(real_qp,
607 qp_attr,
608 qp_attr_mask);
609 if (!new_pps) {
610 mutex_unlock(&real_qp->qp_sec->mutex);
611 return -ENOMEM;
612 }
613
614
615
616
617
618
619
620 ret = port_pkey_list_insert(&new_pps->main);
621
622 if (!ret)
623 ret = port_pkey_list_insert(&new_pps->alt);
624
625 if (!ret)
626 ret = check_qp_port_pkey_settings(new_pps,
627 real_qp->qp_sec);
628 }
629
630 if (!ret)
631 ret = real_qp->device->modify_qp(real_qp,
632 qp_attr,
633 qp_attr_mask,
634 udata);
635
636 if (new_pps) {
637
638
639
640 if (ret) {
641 tmp_pps = new_pps;
642 } else {
643 tmp_pps = real_qp->qp_sec->ports_pkeys;
644 real_qp->qp_sec->ports_pkeys = new_pps;
645 }
646
647 if (tmp_pps) {
648 port_pkey_list_remove(&tmp_pps->main);
649 port_pkey_list_remove(&tmp_pps->alt);
650 }
651 kfree(tmp_pps);
652 mutex_unlock(&real_qp->qp_sec->mutex);
653 }
654 return ret;
655}
656
657static int ib_security_pkey_access(struct ib_device *dev,
658 u8 port_num,
659 u16 pkey_index,
660 void *sec)
661{
662 u64 subnet_prefix;
663 u16 pkey;
664 int ret;
665
666 if (!rdma_protocol_ib(dev, port_num))
667 return 0;
668
669 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
670 if (ret)
671 return ret;
672
673 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
674
675 if (ret)
676 return ret;
677
678 return security_ib_pkey_access(sec, subnet_prefix, pkey);
679}
680
681void ib_mad_agent_security_change(void)
682{
683 struct ib_mad_agent *ag;
684
685 spin_lock(&mad_agent_list_lock);
686 list_for_each_entry(ag,
687 &mad_agent_list,
688 mad_agent_sec_list)
689 WRITE_ONCE(ag->smp_allowed,
690 !security_ib_endport_manage_subnet(ag->security,
691 dev_name(&ag->device->dev), ag->port_num));
692 spin_unlock(&mad_agent_list_lock);
693}
694
695int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
696 enum ib_qp_type qp_type)
697{
698 int ret;
699
700 if (!rdma_protocol_ib(agent->device, agent->port_num))
701 return 0;
702
703 INIT_LIST_HEAD(&agent->mad_agent_sec_list);
704
705 ret = security_ib_alloc_security(&agent->security);
706 if (ret)
707 return ret;
708
709 if (qp_type != IB_QPT_SMI)
710 return 0;
711
712 spin_lock(&mad_agent_list_lock);
713 ret = security_ib_endport_manage_subnet(agent->security,
714 dev_name(&agent->device->dev),
715 agent->port_num);
716 if (ret)
717 goto free_security;
718
719 WRITE_ONCE(agent->smp_allowed, true);
720 list_add(&agent->mad_agent_sec_list, &mad_agent_list);
721 spin_unlock(&mad_agent_list_lock);
722 return 0;
723
724free_security:
725 spin_unlock(&mad_agent_list_lock);
726 security_ib_free_security(agent->security);
727 return ret;
728}
729
730void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
731{
732 if (!rdma_protocol_ib(agent->device, agent->port_num))
733 return;
734
735 if (agent->qp->qp_type == IB_QPT_SMI) {
736 spin_lock(&mad_agent_list_lock);
737 list_del(&agent->mad_agent_sec_list);
738 spin_unlock(&mad_agent_list_lock);
739 }
740
741 security_ib_free_security(agent->security);
742}
743
744int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
745{
746 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
747 return 0;
748
749 if (map->agent.qp->qp_type == IB_QPT_SMI) {
750 if (!READ_ONCE(map->agent.smp_allowed))
751 return -EACCES;
752 return 0;
753 }
754
755 return ib_security_pkey_access(map->agent.device,
756 map->agent.port_num,
757 pkey_index,
758 map->agent.security);
759}
760