1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/security.h>
34#include <linux/completion.h>
35#include <linux/list.h>
36
37#include <rdma/ib_verbs.h>
38#include <rdma/ib_cache.h>
39#include "core_priv.h"
40#include "mad_priv.h"
41
42static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
43{
44 struct pkey_index_qp_list *pkey = NULL;
45 struct pkey_index_qp_list *tmp_pkey;
46 struct ib_device *dev = pp->sec->dev;
47
48 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
49 list_for_each_entry(tmp_pkey,
50 &dev->port_pkey_list[pp->port_num].pkey_list,
51 pkey_index_list) {
52 if (tmp_pkey->pkey_index == pp->pkey_index) {
53 pkey = tmp_pkey;
54 break;
55 }
56 }
57 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
58 return pkey;
59}
60
61static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
62 u16 *pkey,
63 u64 *subnet_prefix)
64{
65 struct ib_device *dev = pp->sec->dev;
66 int ret;
67
68 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
69 if (ret)
70 return ret;
71
72 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
73
74 return ret;
75}
76
77static int enforce_qp_pkey_security(u16 pkey,
78 u64 subnet_prefix,
79 struct ib_qp_security *qp_sec)
80{
81 struct ib_qp_security *shared_qp_sec;
82 int ret;
83
84 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
85 if (ret)
86 return ret;
87
88 list_for_each_entry(shared_qp_sec,
89 &qp_sec->shared_qp_list,
90 shared_qp_list) {
91 ret = security_ib_pkey_access(shared_qp_sec->security,
92 subnet_prefix,
93 pkey);
94 if (ret)
95 return ret;
96 }
97 return 0;
98}
99
100
101
102
103
104
105
106
107
108static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
109 struct ib_qp_security *sec)
110{
111 u64 subnet_prefix;
112 u16 pkey;
113 int ret = 0;
114
115 if (!pps)
116 return 0;
117
118 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
119 ret = get_pkey_and_subnet_prefix(&pps->main,
120 &pkey,
121 &subnet_prefix);
122 if (ret)
123 return ret;
124
125 ret = enforce_qp_pkey_security(pkey,
126 subnet_prefix,
127 sec);
128 if (ret)
129 return ret;
130 }
131
132 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
133 ret = get_pkey_and_subnet_prefix(&pps->alt,
134 &pkey,
135 &subnet_prefix);
136 if (ret)
137 return ret;
138
139 ret = enforce_qp_pkey_security(pkey,
140 subnet_prefix,
141 sec);
142 }
143
144 return ret;
145}
146
147
148
149
150static void qp_to_error(struct ib_qp_security *sec)
151{
152 struct ib_qp_security *shared_qp_sec;
153 struct ib_qp_attr attr = {
154 .qp_state = IB_QPS_ERR
155 };
156 struct ib_event event = {
157 .event = IB_EVENT_QP_FATAL
158 };
159
160
161
162
163
164 if (sec->destroying)
165 return;
166
167 ib_modify_qp(sec->qp,
168 &attr,
169 IB_QP_STATE);
170
171 if (sec->qp->event_handler && sec->qp->qp_context) {
172 event.element.qp = sec->qp;
173 sec->qp->event_handler(&event,
174 sec->qp->qp_context);
175 }
176
177 list_for_each_entry(shared_qp_sec,
178 &sec->shared_qp_list,
179 shared_qp_list) {
180 struct ib_qp *qp = shared_qp_sec->qp;
181
182 if (qp->event_handler && qp->qp_context) {
183 event.element.qp = qp;
184 event.device = qp->device;
185 qp->event_handler(&event,
186 qp->qp_context);
187 }
188 }
189}
190
191static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
192 struct ib_device *device,
193 u8 port_num,
194 u64 subnet_prefix)
195{
196 struct ib_port_pkey *pp, *tmp_pp;
197 bool comp;
198 LIST_HEAD(to_error_list);
199 u16 pkey_val;
200
201 if (!ib_get_cached_pkey(device,
202 port_num,
203 pkey->pkey_index,
204 &pkey_val)) {
205 spin_lock(&pkey->qp_list_lock);
206 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
207 if (atomic_read(&pp->sec->error_list_count))
208 continue;
209
210 if (enforce_qp_pkey_security(pkey_val,
211 subnet_prefix,
212 pp->sec)) {
213 atomic_inc(&pp->sec->error_list_count);
214 list_add(&pp->to_error_list,
215 &to_error_list);
216 }
217 }
218 spin_unlock(&pkey->qp_list_lock);
219 }
220
221 list_for_each_entry_safe(pp,
222 tmp_pp,
223 &to_error_list,
224 to_error_list) {
225 mutex_lock(&pp->sec->mutex);
226 qp_to_error(pp->sec);
227 list_del(&pp->to_error_list);
228 atomic_dec(&pp->sec->error_list_count);
229 comp = pp->sec->destroying;
230 mutex_unlock(&pp->sec->mutex);
231
232 if (comp)
233 complete(&pp->sec->error_complete);
234 }
235}
236
237
238
239
240static int port_pkey_list_insert(struct ib_port_pkey *pp)
241{
242 struct pkey_index_qp_list *tmp_pkey;
243 struct pkey_index_qp_list *pkey;
244 struct ib_device *dev;
245 u8 port_num = pp->port_num;
246 int ret = 0;
247
248 if (pp->state != IB_PORT_PKEY_VALID)
249 return 0;
250
251 dev = pp->sec->dev;
252
253 pkey = get_pkey_idx_qp_list(pp);
254
255 if (!pkey) {
256 bool found = false;
257
258 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
259 if (!pkey)
260 return -ENOMEM;
261
262 spin_lock(&dev->port_pkey_list[port_num].list_lock);
263
264
265
266 list_for_each_entry(tmp_pkey,
267 &dev->port_pkey_list[port_num].pkey_list,
268 pkey_index_list) {
269 if (tmp_pkey->pkey_index == pp->pkey_index) {
270 kfree(pkey);
271 pkey = tmp_pkey;
272 found = true;
273 break;
274 }
275 }
276
277 if (!found) {
278 pkey->pkey_index = pp->pkey_index;
279 spin_lock_init(&pkey->qp_list_lock);
280 INIT_LIST_HEAD(&pkey->qp_list);
281 list_add(&pkey->pkey_index_list,
282 &dev->port_pkey_list[port_num].pkey_list);
283 }
284 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
285 }
286
287 spin_lock(&pkey->qp_list_lock);
288 list_add(&pp->qp_list, &pkey->qp_list);
289 spin_unlock(&pkey->qp_list_lock);
290
291 pp->state = IB_PORT_PKEY_LISTED;
292
293 return ret;
294}
295
296
297
298
299static void port_pkey_list_remove(struct ib_port_pkey *pp)
300{
301 struct pkey_index_qp_list *pkey;
302
303 if (pp->state != IB_PORT_PKEY_LISTED)
304 return;
305
306 pkey = get_pkey_idx_qp_list(pp);
307
308 spin_lock(&pkey->qp_list_lock);
309 list_del(&pp->qp_list);
310 spin_unlock(&pkey->qp_list_lock);
311
312
313
314
315 pp->state = IB_PORT_PKEY_VALID;
316}
317
318static void destroy_qp_security(struct ib_qp_security *sec)
319{
320 security_ib_free_security(sec->security);
321 kfree(sec->ports_pkeys);
322 kfree(sec);
323}
324
325
326
327
328static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
329 const struct ib_qp_attr *qp_attr,
330 int qp_attr_mask)
331{
332 struct ib_ports_pkeys *new_pps;
333 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
334
335 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
336 if (!new_pps)
337 return NULL;
338
339 if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
340 if (!qp_pps) {
341 new_pps->main.port_num = qp_attr->port_num;
342 new_pps->main.pkey_index = qp_attr->pkey_index;
343 } else {
344 new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
345 qp_attr->port_num :
346 qp_pps->main.port_num;
347
348 new_pps->main.pkey_index =
349 (qp_attr_mask & IB_QP_PKEY_INDEX) ?
350 qp_attr->pkey_index :
351 qp_pps->main.pkey_index;
352 }
353 new_pps->main.state = IB_PORT_PKEY_VALID;
354 } else if (qp_pps) {
355 new_pps->main.port_num = qp_pps->main.port_num;
356 new_pps->main.pkey_index = qp_pps->main.pkey_index;
357 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
358 new_pps->main.state = IB_PORT_PKEY_VALID;
359 }
360
361 if (qp_attr_mask & IB_QP_ALT_PATH) {
362 new_pps->alt.port_num = qp_attr->alt_port_num;
363 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
364 new_pps->alt.state = IB_PORT_PKEY_VALID;
365 } else if (qp_pps) {
366 new_pps->alt.port_num = qp_pps->alt.port_num;
367 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
368 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
369 new_pps->alt.state = IB_PORT_PKEY_VALID;
370 }
371
372 new_pps->main.sec = qp->qp_sec;
373 new_pps->alt.sec = qp->qp_sec;
374 return new_pps;
375}
376
377int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
378{
379 struct ib_qp *real_qp = qp->real_qp;
380 int ret;
381
382 ret = ib_create_qp_security(qp, dev);
383
384 if (ret)
385 return ret;
386
387 if (!qp->qp_sec)
388 return 0;
389
390 mutex_lock(&real_qp->qp_sec->mutex);
391 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
392 qp->qp_sec);
393
394 if (ret)
395 goto ret;
396
397 if (qp != real_qp)
398 list_add(&qp->qp_sec->shared_qp_list,
399 &real_qp->qp_sec->shared_qp_list);
400ret:
401 mutex_unlock(&real_qp->qp_sec->mutex);
402 if (ret)
403 destroy_qp_security(qp->qp_sec);
404
405 return ret;
406}
407
408void ib_close_shared_qp_security(struct ib_qp_security *sec)
409{
410 struct ib_qp *real_qp = sec->qp->real_qp;
411
412 mutex_lock(&real_qp->qp_sec->mutex);
413 list_del(&sec->shared_qp_list);
414 mutex_unlock(&real_qp->qp_sec->mutex);
415
416 destroy_qp_security(sec);
417}
418
419int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
420{
421 u8 i = rdma_start_port(dev);
422 bool is_ib = false;
423 int ret;
424
425 while (i <= rdma_end_port(dev) && !is_ib)
426 is_ib = rdma_protocol_ib(dev, i++);
427
428
429 if (!is_ib)
430 return 0;
431
432 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
433 if (!qp->qp_sec)
434 return -ENOMEM;
435
436 qp->qp_sec->qp = qp;
437 qp->qp_sec->dev = dev;
438 mutex_init(&qp->qp_sec->mutex);
439 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
440 atomic_set(&qp->qp_sec->error_list_count, 0);
441 init_completion(&qp->qp_sec->error_complete);
442 ret = security_ib_alloc_security(&qp->qp_sec->security);
443 if (ret) {
444 kfree(qp->qp_sec);
445 qp->qp_sec = NULL;
446 }
447
448 return ret;
449}
450EXPORT_SYMBOL(ib_create_qp_security);
451
452void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
453{
454
455 if (!sec)
456 return;
457
458 mutex_lock(&sec->mutex);
459
460
461
462
463 if (sec->ports_pkeys) {
464 port_pkey_list_remove(&sec->ports_pkeys->main);
465 port_pkey_list_remove(&sec->ports_pkeys->alt);
466 }
467
468
469
470
471
472 sec->destroying = true;
473
474
475
476
477 sec->error_comps_pending = atomic_read(&sec->error_list_count);
478
479 mutex_unlock(&sec->mutex);
480}
481
482void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
483{
484 int ret;
485 int i;
486
487
488 if (!sec)
489 return;
490
491
492
493
494
495 for (i = 0; i < sec->error_comps_pending; i++)
496 wait_for_completion(&sec->error_complete);
497
498 mutex_lock(&sec->mutex);
499 sec->destroying = false;
500
501
502
503
504
505
506
507
508
509
510 if (sec->ports_pkeys) {
511 port_pkey_list_insert(&sec->ports_pkeys->main);
512 port_pkey_list_insert(&sec->ports_pkeys->alt);
513 }
514
515 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
516 if (ret)
517 qp_to_error(sec);
518
519 mutex_unlock(&sec->mutex);
520}
521
522void ib_destroy_qp_security_end(struct ib_qp_security *sec)
523{
524 int i;
525
526
527 if (!sec)
528 return;
529
530
531
532
533
534
535 for (i = 0; i < sec->error_comps_pending; i++)
536 wait_for_completion(&sec->error_complete);
537
538 destroy_qp_security(sec);
539}
540
541void ib_security_cache_change(struct ib_device *device,
542 u8 port_num,
543 u64 subnet_prefix)
544{
545 struct pkey_index_qp_list *pkey;
546
547 list_for_each_entry(pkey,
548 &device->port_pkey_list[port_num].pkey_list,
549 pkey_index_list) {
550 check_pkey_qps(pkey,
551 device,
552 port_num,
553 subnet_prefix);
554 }
555}
556
557void ib_security_destroy_port_pkey_list(struct ib_device *device)
558{
559 struct pkey_index_qp_list *pkey, *tmp_pkey;
560 int i;
561
562 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
563 spin_lock(&device->port_pkey_list[i].list_lock);
564 list_for_each_entry_safe(pkey,
565 tmp_pkey,
566 &device->port_pkey_list[i].pkey_list,
567 pkey_index_list) {
568 list_del(&pkey->pkey_index_list);
569 kfree(pkey);
570 }
571 spin_unlock(&device->port_pkey_list[i].list_lock);
572 }
573}
574
575int ib_security_modify_qp(struct ib_qp *qp,
576 struct ib_qp_attr *qp_attr,
577 int qp_attr_mask,
578 struct ib_udata *udata)
579{
580 int ret = 0;
581 struct ib_ports_pkeys *tmp_pps;
582 struct ib_ports_pkeys *new_pps = NULL;
583 struct ib_qp *real_qp = qp->real_qp;
584 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
585 real_qp->qp_type == IB_QPT_GSI ||
586 real_qp->qp_type >= IB_QPT_RESERVED1);
587 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
588 (qp_attr_mask & IB_QP_ALT_PATH));
589
590 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
591 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
592 !real_qp->qp_sec),
593 "%s: QP security is not initialized for IB QP: %d\n",
594 __func__, real_qp->qp_num);
595
596
597
598
599
600
601
602 if (pps_change && !special_qp && real_qp->qp_sec) {
603 mutex_lock(&real_qp->qp_sec->mutex);
604 new_pps = get_new_pps(real_qp,
605 qp_attr,
606 qp_attr_mask);
607 if (!new_pps) {
608 mutex_unlock(&real_qp->qp_sec->mutex);
609 return -ENOMEM;
610 }
611
612
613
614
615
616
617
618 ret = port_pkey_list_insert(&new_pps->main);
619
620 if (!ret)
621 ret = port_pkey_list_insert(&new_pps->alt);
622
623 if (!ret)
624 ret = check_qp_port_pkey_settings(new_pps,
625 real_qp->qp_sec);
626 }
627
628 if (!ret)
629 ret = real_qp->device->ops.modify_qp(real_qp,
630 qp_attr,
631 qp_attr_mask,
632 udata);
633
634 if (new_pps) {
635
636
637
638 if (ret) {
639 tmp_pps = new_pps;
640 } else {
641 tmp_pps = real_qp->qp_sec->ports_pkeys;
642 real_qp->qp_sec->ports_pkeys = new_pps;
643 }
644
645 if (tmp_pps) {
646 port_pkey_list_remove(&tmp_pps->main);
647 port_pkey_list_remove(&tmp_pps->alt);
648 }
649 kfree(tmp_pps);
650 mutex_unlock(&real_qp->qp_sec->mutex);
651 }
652 return ret;
653}
654
655static int ib_security_pkey_access(struct ib_device *dev,
656 u8 port_num,
657 u16 pkey_index,
658 void *sec)
659{
660 u64 subnet_prefix;
661 u16 pkey;
662 int ret;
663
664 if (!rdma_protocol_ib(dev, port_num))
665 return 0;
666
667 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
668 if (ret)
669 return ret;
670
671 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
672
673 if (ret)
674 return ret;
675
676 return security_ib_pkey_access(sec, subnet_prefix, pkey);
677}
678
679static int ib_mad_agent_security_change(struct notifier_block *nb,
680 unsigned long event,
681 void *data)
682{
683 struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
684
685 if (event != LSM_POLICY_CHANGE)
686 return NOTIFY_DONE;
687
688 ag->smp_allowed = !security_ib_endport_manage_subnet(
689 ag->security, dev_name(&ag->device->dev), ag->port_num);
690
691 return NOTIFY_OK;
692}
693
694int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
695 enum ib_qp_type qp_type)
696{
697 int ret;
698
699 if (!rdma_protocol_ib(agent->device, agent->port_num))
700 return 0;
701
702 ret = security_ib_alloc_security(&agent->security);
703 if (ret)
704 return ret;
705
706 if (qp_type != IB_QPT_SMI)
707 return 0;
708
709 ret = security_ib_endport_manage_subnet(agent->security,
710 dev_name(&agent->device->dev),
711 agent->port_num);
712 if (ret)
713 return ret;
714
715 agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
716 ret = register_lsm_notifier(&agent->lsm_nb);
717 if (ret)
718 return ret;
719
720 agent->smp_allowed = true;
721 agent->lsm_nb_reg = true;
722 return 0;
723}
724
725void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
726{
727 if (!rdma_protocol_ib(agent->device, agent->port_num))
728 return;
729
730 security_ib_free_security(agent->security);
731 if (agent->lsm_nb_reg)
732 unregister_lsm_notifier(&agent->lsm_nb);
733}
734
735int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
736{
737 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
738 return 0;
739
740 if (map->agent.qp->qp_type == IB_QPT_SMI) {
741 if (!map->agent.smp_allowed)
742 return -EACCES;
743 return 0;
744 }
745
746 return ib_security_pkey_access(map->agent.device,
747 map->agent.port_num,
748 pkey_index,
749 map->agent.security);
750}
751