1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40#include <linux/dma-mapping.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <rdma/ib_cache.h>
44
45#include "mad_priv.h"
46#include "mad_rmpp.h"
47#include "smi.h"
48#include "opa_smi.h"
49#include "agent.h"
50#include "core_priv.h"
51
52static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
54
55module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
59
60static struct list_head ib_mad_port_list;
61static u32 ib_mad_client_id = 0;
62
63
64static DEFINE_SPINLOCK(ib_mad_port_list_lock);
65
66
67static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
72 const struct ib_mad_hdr *mad);
73static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76static void timeout_sends(struct work_struct *work);
77static void local_completions(struct work_struct *work);
78static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
80 u8 mgmt_class);
81static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
83static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
84 struct ib_wc *wc);
85static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
86
87
88
89
90
91static inline struct ib_mad_port_private *
92__ib_get_mad_port(struct ib_device *device, int port_num)
93{
94 struct ib_mad_port_private *entry;
95
96 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97 if (entry->device == device && entry->port_num == port_num)
98 return entry;
99 }
100 return NULL;
101}
102
103
104
105
106
107static inline struct ib_mad_port_private *
108ib_get_mad_port(struct ib_device *device, int port_num)
109{
110 struct ib_mad_port_private *entry;
111 unsigned long flags;
112
113 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114 entry = __ib_get_mad_port(device, port_num);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
116
117 return entry;
118}
119
120static inline u8 convert_mgmt_class(u8 mgmt_class)
121{
122
123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
124 0 : mgmt_class;
125}
126
127static int get_spl_qp_index(enum ib_qp_type qp_type)
128{
129 switch (qp_type)
130 {
131 case IB_QPT_SMI:
132 return 0;
133 case IB_QPT_GSI:
134 return 1;
135 default:
136 return -1;
137 }
138}
139
140static int vendor_class_index(u8 mgmt_class)
141{
142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
143}
144
145static int is_vendor_class(u8 mgmt_class)
146{
147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
149 return 0;
150 return 1;
151}
152
153static int is_vendor_oui(char *oui)
154{
155 if (oui[0] || oui[1] || oui[2])
156 return 1;
157 return 0;
158}
159
160static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class *vendor_class,
162 struct ib_mad_reg_req *mad_reg_req)
163{
164 struct ib_mad_mgmt_method_table *method;
165 int i;
166
167 for (i = 0; i < MAX_MGMT_OUI; i++) {
168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169 method = vendor_class->method_table[i];
170 if (method) {
171 if (method_in_use(&method, mad_reg_req))
172 return 1;
173 else
174 break;
175 }
176 }
177 }
178 return 0;
179}
180
181int ib_response_mad(const struct ib_mad_hdr *hdr)
182{
183 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
187}
188EXPORT_SYMBOL(ib_response_mad);
189
190
191
192
193struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
194 u8 port_num,
195 enum ib_qp_type qp_type,
196 struct ib_mad_reg_req *mad_reg_req,
197 u8 rmpp_version,
198 ib_mad_send_handler send_handler,
199 ib_mad_recv_handler recv_handler,
200 void *context,
201 u32 registration_flags)
202{
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
211 int ret2, qpn;
212 unsigned long flags;
213 u8 mgmt_class, vclass;
214
215
216 qpn = get_spl_qp_index(qp_type);
217 if (qpn == -1) {
218 dev_notice(&device->dev,
219 "ib_register_mad_agent: invalid QP Type %d\n",
220 qp_type);
221 goto error1;
222 }
223
224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
227 rmpp_version);
228 goto error1;
229 }
230
231
232 if (mad_reg_req) {
233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234 dev_notice(&device->dev,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req->mgmt_class_version);
237 goto error1;
238 }
239 if (!recv_handler) {
240 dev_notice(&device->dev,
241 "ib_register_mad_agent: no recv_handler\n");
242 goto error1;
243 }
244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
245
246
247
248
249 if (mad_reg_req->mgmt_class !=
250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req->mgmt_class);
254 goto error1;
255 }
256 } else if (mad_reg_req->mgmt_class == 0) {
257
258
259
260
261 dev_notice(&device->dev,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
263 goto error1;
264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
265
266
267
268
269 if (!is_vendor_oui(mad_reg_req->oui)) {
270 dev_notice(&device->dev,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req->mgmt_class);
273 goto error1;
274 }
275 }
276
277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
278 if (rmpp_version) {
279 dev_notice(&device->dev,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req->mgmt_class);
282 goto error1;
283 }
284 }
285
286
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
295 goto error1;
296 }
297 } else {
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
305 goto error1;
306 }
307 }
308 } else {
309
310 if (!send_handler)
311 goto error1;
312 if (registration_flags & IB_MAD_USER_RMPP)
313 goto error1;
314 }
315
316
317 port_priv = ib_get_mad_port(device, port_num);
318 if (!port_priv) {
319 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
320 ret = ERR_PTR(-ENODEV);
321 goto error1;
322 }
323
324
325
326 if (!port_priv->qp_info[qpn].qp) {
327 dev_notice(&device->dev,
328 "ib_register_mad_agent: QP %d not supported\n", qpn);
329 ret = ERR_PTR(-EPROTONOSUPPORT);
330 goto error1;
331 }
332
333
334 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
335 if (!mad_agent_priv) {
336 ret = ERR_PTR(-ENOMEM);
337 goto error1;
338 }
339
340 if (mad_reg_req) {
341 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
342 if (!reg_req) {
343 ret = ERR_PTR(-ENOMEM);
344 goto error3;
345 }
346 }
347
348
349 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
350 mad_agent_priv->reg_req = reg_req;
351 mad_agent_priv->agent.rmpp_version = rmpp_version;
352 mad_agent_priv->agent.device = device;
353 mad_agent_priv->agent.recv_handler = recv_handler;
354 mad_agent_priv->agent.send_handler = send_handler;
355 mad_agent_priv->agent.context = context;
356 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
357 mad_agent_priv->agent.port_num = port_num;
358 mad_agent_priv->agent.flags = registration_flags;
359 spin_lock_init(&mad_agent_priv->lock);
360 INIT_LIST_HEAD(&mad_agent_priv->send_list);
361 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
362 INIT_LIST_HEAD(&mad_agent_priv->done_list);
363 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
364 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
365 INIT_LIST_HEAD(&mad_agent_priv->local_list);
366 INIT_WORK(&mad_agent_priv->local_work, local_completions);
367 atomic_set(&mad_agent_priv->refcount, 1);
368 init_completion(&mad_agent_priv->comp);
369
370 spin_lock_irqsave(&port_priv->reg_lock, flags);
371 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
372
373
374
375
376
377 if (mad_reg_req) {
378 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
379 if (!is_vendor_class(mgmt_class)) {
380 class = port_priv->version[mad_reg_req->
381 mgmt_class_version].class;
382 if (class) {
383 method = class->method_table[mgmt_class];
384 if (method) {
385 if (method_in_use(&method,
386 mad_reg_req))
387 goto error4;
388 }
389 }
390 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
391 mgmt_class);
392 } else {
393
394 vendor = port_priv->version[mad_reg_req->
395 mgmt_class_version].vendor;
396 if (vendor) {
397 vclass = vendor_class_index(mgmt_class);
398 vendor_class = vendor->vendor_class[vclass];
399 if (vendor_class) {
400 if (is_vendor_method_in_use(
401 vendor_class,
402 mad_reg_req))
403 goto error4;
404 }
405 }
406 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
407 }
408 if (ret2) {
409 ret = ERR_PTR(ret2);
410 goto error4;
411 }
412 }
413
414
415 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
416 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
417
418 return &mad_agent_priv->agent;
419
420error4:
421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
422 kfree(reg_req);
423error3:
424 kfree(mad_agent_priv);
425error1:
426 return ret;
427}
428EXPORT_SYMBOL(ib_register_mad_agent);
429
430static inline int is_snooping_sends(int mad_snoop_flags)
431{
432 return (mad_snoop_flags &
433 (
434
435 IB_MAD_SNOOP_SEND_COMPLETIONS
436));
437}
438
439static inline int is_snooping_recvs(int mad_snoop_flags)
440{
441 return (mad_snoop_flags &
442 (IB_MAD_SNOOP_RECVS
443));
444}
445
446static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
447 struct ib_mad_snoop_private *mad_snoop_priv)
448{
449 struct ib_mad_snoop_private **new_snoop_table;
450 unsigned long flags;
451 int i;
452
453 spin_lock_irqsave(&qp_info->snoop_lock, flags);
454
455 for (i = 0; i < qp_info->snoop_table_size; i++)
456 if (!qp_info->snoop_table[i])
457 break;
458
459 if (i == qp_info->snoop_table_size) {
460
461 new_snoop_table = krealloc(qp_info->snoop_table,
462 sizeof mad_snoop_priv *
463 (qp_info->snoop_table_size + 1),
464 GFP_ATOMIC);
465 if (!new_snoop_table) {
466 i = -ENOMEM;
467 goto out;
468 }
469
470 qp_info->snoop_table = new_snoop_table;
471 qp_info->snoop_table_size++;
472 }
473 qp_info->snoop_table[i] = mad_snoop_priv;
474 atomic_inc(&qp_info->snoop_count);
475out:
476 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
477 return i;
478}
479
480struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
481 u8 port_num,
482 enum ib_qp_type qp_type,
483 int mad_snoop_flags,
484 ib_mad_snoop_handler snoop_handler,
485 ib_mad_recv_handler recv_handler,
486 void *context)
487{
488 struct ib_mad_port_private *port_priv;
489 struct ib_mad_agent *ret;
490 struct ib_mad_snoop_private *mad_snoop_priv;
491 int qpn;
492
493
494 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
495 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
496 ret = ERR_PTR(-EINVAL);
497 goto error1;
498 }
499 qpn = get_spl_qp_index(qp_type);
500 if (qpn == -1) {
501 ret = ERR_PTR(-EINVAL);
502 goto error1;
503 }
504 port_priv = ib_get_mad_port(device, port_num);
505 if (!port_priv) {
506 ret = ERR_PTR(-ENODEV);
507 goto error1;
508 }
509
510 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
511 if (!mad_snoop_priv) {
512 ret = ERR_PTR(-ENOMEM);
513 goto error1;
514 }
515
516
517 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
518 mad_snoop_priv->agent.device = device;
519 mad_snoop_priv->agent.recv_handler = recv_handler;
520 mad_snoop_priv->agent.snoop_handler = snoop_handler;
521 mad_snoop_priv->agent.context = context;
522 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
523 mad_snoop_priv->agent.port_num = port_num;
524 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
525 init_completion(&mad_snoop_priv->comp);
526 mad_snoop_priv->snoop_index = register_snoop_agent(
527 &port_priv->qp_info[qpn],
528 mad_snoop_priv);
529 if (mad_snoop_priv->snoop_index < 0) {
530 ret = ERR_PTR(mad_snoop_priv->snoop_index);
531 goto error2;
532 }
533
534 atomic_set(&mad_snoop_priv->refcount, 1);
535 return &mad_snoop_priv->agent;
536
537error2:
538 kfree(mad_snoop_priv);
539error1:
540 return ret;
541}
542EXPORT_SYMBOL(ib_register_mad_snoop);
543
544static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
545{
546 if (atomic_dec_and_test(&mad_agent_priv->refcount))
547 complete(&mad_agent_priv->comp);
548}
549
550static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
551{
552 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
553 complete(&mad_snoop_priv->comp);
554}
555
556static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
557{
558 struct ib_mad_port_private *port_priv;
559 unsigned long flags;
560
561
562
563
564
565
566
567 cancel_mads(mad_agent_priv);
568 port_priv = mad_agent_priv->qp_info->port_priv;
569 cancel_delayed_work(&mad_agent_priv->timed_work);
570
571 spin_lock_irqsave(&port_priv->reg_lock, flags);
572 remove_mad_reg_req(mad_agent_priv);
573 list_del(&mad_agent_priv->agent_list);
574 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
575
576 flush_workqueue(port_priv->wq);
577 ib_cancel_rmpp_recvs(mad_agent_priv);
578
579 deref_mad_agent(mad_agent_priv);
580 wait_for_completion(&mad_agent_priv->comp);
581
582 kfree(mad_agent_priv->reg_req);
583 kfree(mad_agent_priv);
584}
585
586static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
587{
588 struct ib_mad_qp_info *qp_info;
589 unsigned long flags;
590
591 qp_info = mad_snoop_priv->qp_info;
592 spin_lock_irqsave(&qp_info->snoop_lock, flags);
593 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
594 atomic_dec(&qp_info->snoop_count);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
596
597 deref_snoop_agent(mad_snoop_priv);
598 wait_for_completion(&mad_snoop_priv->comp);
599
600 kfree(mad_snoop_priv);
601}
602
603
604
605
606int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
607{
608 struct ib_mad_agent_private *mad_agent_priv;
609 struct ib_mad_snoop_private *mad_snoop_priv;
610
611
612 if (mad_agent->hi_tid) {
613 mad_agent_priv = container_of(mad_agent,
614 struct ib_mad_agent_private,
615 agent);
616 unregister_mad_agent(mad_agent_priv);
617 } else {
618 mad_snoop_priv = container_of(mad_agent,
619 struct ib_mad_snoop_private,
620 agent);
621 unregister_mad_snoop(mad_snoop_priv);
622 }
623 return 0;
624}
625EXPORT_SYMBOL(ib_unregister_mad_agent);
626
627static void dequeue_mad(struct ib_mad_list_head *mad_list)
628{
629 struct ib_mad_queue *mad_queue;
630 unsigned long flags;
631
632 BUG_ON(!mad_list->mad_queue);
633 mad_queue = mad_list->mad_queue;
634 spin_lock_irqsave(&mad_queue->lock, flags);
635 list_del(&mad_list->list);
636 mad_queue->count--;
637 spin_unlock_irqrestore(&mad_queue->lock, flags);
638}
639
640static void snoop_send(struct ib_mad_qp_info *qp_info,
641 struct ib_mad_send_buf *send_buf,
642 struct ib_mad_send_wc *mad_send_wc,
643 int mad_snoop_flags)
644{
645 struct ib_mad_snoop_private *mad_snoop_priv;
646 unsigned long flags;
647 int i;
648
649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650 for (i = 0; i < qp_info->snoop_table_size; i++) {
651 mad_snoop_priv = qp_info->snoop_table[i];
652 if (!mad_snoop_priv ||
653 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
654 continue;
655
656 atomic_inc(&mad_snoop_priv->refcount);
657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
658 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
659 send_buf, mad_send_wc);
660 deref_snoop_agent(mad_snoop_priv);
661 spin_lock_irqsave(&qp_info->snoop_lock, flags);
662 }
663 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
664}
665
666static void snoop_recv(struct ib_mad_qp_info *qp_info,
667 struct ib_mad_recv_wc *mad_recv_wc,
668 int mad_snoop_flags)
669{
670 struct ib_mad_snoop_private *mad_snoop_priv;
671 unsigned long flags;
672 int i;
673
674 spin_lock_irqsave(&qp_info->snoop_lock, flags);
675 for (i = 0; i < qp_info->snoop_table_size; i++) {
676 mad_snoop_priv = qp_info->snoop_table[i];
677 if (!mad_snoop_priv ||
678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
679 continue;
680
681 atomic_inc(&mad_snoop_priv->refcount);
682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
683 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
684 mad_recv_wc);
685 deref_snoop_agent(mad_snoop_priv);
686 spin_lock_irqsave(&qp_info->snoop_lock, flags);
687 }
688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
689}
690
691static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
692 u16 pkey_index, u8 port_num, struct ib_wc *wc)
693{
694 memset(wc, 0, sizeof *wc);
695 wc->wr_cqe = cqe;
696 wc->status = IB_WC_SUCCESS;
697 wc->opcode = IB_WC_RECV;
698 wc->pkey_index = pkey_index;
699 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
700 wc->src_qp = IB_QP0;
701 wc->qp = qp;
702 wc->slid = slid;
703 wc->sl = 0;
704 wc->dlid_path_bits = 0;
705 wc->port_num = port_num;
706}
707
708static size_t mad_priv_size(const struct ib_mad_private *mp)
709{
710 return sizeof(struct ib_mad_private) + mp->mad_size;
711}
712
713static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
714{
715 size_t size = sizeof(struct ib_mad_private) + mad_size;
716 struct ib_mad_private *ret = kzalloc(size, flags);
717
718 if (ret)
719 ret->mad_size = mad_size;
720
721 return ret;
722}
723
724static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
725{
726 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
727}
728
729static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
730{
731 return sizeof(struct ib_grh) + mp->mad_size;
732}
733
734
735
736
737
738
739static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
740 struct ib_mad_send_wr_private *mad_send_wr)
741{
742 int ret = 0;
743 struct ib_smp *smp = mad_send_wr->send_buf.mad;
744 struct opa_smp *opa_smp = (struct opa_smp *)smp;
745 unsigned long flags;
746 struct ib_mad_local_private *local;
747 struct ib_mad_private *mad_priv;
748 struct ib_mad_port_private *port_priv;
749 struct ib_mad_agent_private *recv_mad_agent = NULL;
750 struct ib_device *device = mad_agent_priv->agent.device;
751 u8 port_num;
752 struct ib_wc mad_wc;
753 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
754 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
755 u16 out_mad_pkey_index = 0;
756 u16 drslid;
757 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
758 mad_agent_priv->qp_info->port_priv->port_num);
759
760 if (rdma_cap_ib_switch(device) &&
761 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
762 port_num = send_wr->port_num;
763 else
764 port_num = mad_agent_priv->agent.port_num;
765
766
767
768
769
770
771
772 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
773 u32 opa_drslid;
774
775 if ((opa_get_smp_direction(opa_smp)
776 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
777 OPA_LID_PERMISSIVE &&
778 opa_smi_handle_dr_smp_send(opa_smp,
779 rdma_cap_ib_switch(device),
780 port_num) == IB_SMI_DISCARD) {
781 ret = -EINVAL;
782 dev_err(&device->dev, "OPA Invalid directed route\n");
783 goto out;
784 }
785 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
786 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
787 opa_drslid & 0xffff0000) {
788 ret = -EINVAL;
789 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
790 opa_drslid);
791 goto out;
792 }
793 drslid = (u16)(opa_drslid & 0x0000ffff);
794
795
796 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
797 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
798 goto out;
799 } else {
800 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
801 IB_LID_PERMISSIVE &&
802 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
803 IB_SMI_DISCARD) {
804 ret = -EINVAL;
805 dev_err(&device->dev, "Invalid directed route\n");
806 goto out;
807 }
808 drslid = be16_to_cpu(smp->dr_slid);
809
810
811 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
812 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
813 goto out;
814 }
815
816 local = kmalloc(sizeof *local, GFP_ATOMIC);
817 if (!local) {
818 ret = -ENOMEM;
819 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
820 goto out;
821 }
822 local->mad_priv = NULL;
823 local->recv_mad_agent = NULL;
824 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
825 if (!mad_priv) {
826 ret = -ENOMEM;
827 dev_err(&device->dev, "No memory for local response MAD\n");
828 kfree(local);
829 goto out;
830 }
831
832 build_smp_wc(mad_agent_priv->agent.qp,
833 send_wr->wr.wr_cqe, drslid,
834 send_wr->pkey_index,
835 send_wr->port_num, &mad_wc);
836
837 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
838 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
839 + mad_send_wr->send_buf.data_len
840 + sizeof(struct ib_grh);
841 }
842
843
844 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
845 (const struct ib_mad_hdr *)smp, mad_size,
846 (struct ib_mad_hdr *)mad_priv->mad,
847 &mad_size, &out_mad_pkey_index);
848 switch (ret)
849 {
850 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
851 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
852 mad_agent_priv->agent.recv_handler) {
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = mad_agent_priv;
855
856
857
858
859 atomic_inc(&mad_agent_priv->refcount);
860 } else
861 kfree(mad_priv);
862 break;
863 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
864 kfree(mad_priv);
865 break;
866 case IB_MAD_RESULT_SUCCESS:
867
868 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
869 mad_agent_priv->agent.port_num);
870 if (port_priv) {
871 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
872 recv_mad_agent = find_mad_agent(port_priv,
873 (const struct ib_mad_hdr *)mad_priv->mad);
874 }
875 if (!port_priv || !recv_mad_agent) {
876
877
878
879
880 kfree(mad_priv);
881 break;
882 }
883 local->mad_priv = mad_priv;
884 local->recv_mad_agent = recv_mad_agent;
885 break;
886 default:
887 kfree(mad_priv);
888 kfree(local);
889 ret = -EINVAL;
890 goto out;
891 }
892
893 local->mad_send_wr = mad_send_wr;
894 if (opa) {
895 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
896 local->return_wc_byte_len = mad_size;
897 }
898
899 atomic_inc(&mad_agent_priv->refcount);
900
901 spin_lock_irqsave(&mad_agent_priv->lock, flags);
902 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
903 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
904 queue_work(mad_agent_priv->qp_info->port_priv->wq,
905 &mad_agent_priv->local_work);
906 ret = 1;
907out:
908 return ret;
909}
910
911static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
912{
913 int seg_size, pad;
914
915 seg_size = mad_size - hdr_len;
916 if (data_len && seg_size) {
917 pad = seg_size - data_len % seg_size;
918 return pad == seg_size ? 0 : pad;
919 } else
920 return seg_size;
921}
922
923static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
924{
925 struct ib_rmpp_segment *s, *t;
926
927 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
928 list_del(&s->list);
929 kfree(s);
930 }
931}
932
933static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
934 size_t mad_size, gfp_t gfp_mask)
935{
936 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
937 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
938 struct ib_rmpp_segment *seg = NULL;
939 int left, seg_size, pad;
940
941 send_buf->seg_size = mad_size - send_buf->hdr_len;
942 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
943 seg_size = send_buf->seg_size;
944 pad = send_wr->pad;
945
946
947 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
948 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
949 if (!seg) {
950 dev_err(&send_buf->mad_agent->device->dev,
951 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
952 sizeof (*seg) + seg_size, gfp_mask);
953 free_send_rmpp_list(send_wr);
954 return -ENOMEM;
955 }
956 seg->num = ++send_buf->seg_count;
957 list_add_tail(&seg->list, &send_wr->rmpp_list);
958 }
959
960
961 if (pad)
962 memset(seg->data + seg_size - pad, 0, pad);
963
964 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
965 agent.rmpp_version;
966 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
967 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
968
969 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
970 struct ib_rmpp_segment, list);
971 send_wr->last_ack_seg = send_wr->cur_seg;
972 return 0;
973}
974
975int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
976{
977 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
978}
979EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
980
981struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
982 u32 remote_qpn, u16 pkey_index,
983 int rmpp_active,
984 int hdr_len, int data_len,
985 gfp_t gfp_mask,
986 u8 base_version)
987{
988 struct ib_mad_agent_private *mad_agent_priv;
989 struct ib_mad_send_wr_private *mad_send_wr;
990 int pad, message_size, ret, size;
991 void *buf;
992 size_t mad_size;
993 bool opa;
994
995 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
996 agent);
997
998 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
999
1000 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1001 mad_size = sizeof(struct opa_mad);
1002 else
1003 mad_size = sizeof(struct ib_mad);
1004
1005 pad = get_pad_size(hdr_len, data_len, mad_size);
1006 message_size = hdr_len + data_len + pad;
1007
1008 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1009 if (!rmpp_active && message_size > mad_size)
1010 return ERR_PTR(-EINVAL);
1011 } else
1012 if (rmpp_active || message_size > mad_size)
1013 return ERR_PTR(-EINVAL);
1014
1015 size = rmpp_active ? hdr_len : mad_size;
1016 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1017 if (!buf)
1018 return ERR_PTR(-ENOMEM);
1019
1020 mad_send_wr = buf + size;
1021 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1022 mad_send_wr->send_buf.mad = buf;
1023 mad_send_wr->send_buf.hdr_len = hdr_len;
1024 mad_send_wr->send_buf.data_len = data_len;
1025 mad_send_wr->pad = pad;
1026
1027 mad_send_wr->mad_agent_priv = mad_agent_priv;
1028 mad_send_wr->sg_list[0].length = hdr_len;
1029 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1030
1031
1032 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1033 data_len < mad_size - hdr_len)
1034 mad_send_wr->sg_list[1].length = data_len;
1035 else
1036 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1037
1038 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1039
1040 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1041
1042 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.pkey_index = pkey_index;
1050
1051 if (rmpp_active) {
1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1053 if (ret) {
1054 kfree(buf);
1055 return ERR_PTR(ret);
1056 }
1057 }
1058
1059 mad_send_wr->send_buf.mad_agent = mad_agent;
1060 atomic_inc(&mad_agent_priv->refcount);
1061 return &mad_send_wr->send_buf;
1062}
1063EXPORT_SYMBOL(ib_create_send_mad);
1064
1065int ib_get_mad_data_offset(u8 mgmt_class)
1066{
1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1068 return IB_MGMT_SA_HDR;
1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1071 (mgmt_class == IB_MGMT_CLASS_BIS))
1072 return IB_MGMT_DEVICE_HDR;
1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1075 return IB_MGMT_VENDOR_HDR;
1076 else
1077 return IB_MGMT_MAD_HDR;
1078}
1079EXPORT_SYMBOL(ib_get_mad_data_offset);
1080
1081int ib_is_mad_class_rmpp(u8 mgmt_class)
1082{
1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1086 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1089 return 1;
1090 return 0;
1091}
1092EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1093
1094void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1095{
1096 struct ib_mad_send_wr_private *mad_send_wr;
1097 struct list_head *list;
1098
1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1100 send_buf);
1101 list = &mad_send_wr->cur_seg->list;
1102
1103 if (mad_send_wr->cur_seg->num < seg_num) {
1104 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1106 break;
1107 } else if (mad_send_wr->cur_seg->num > seg_num) {
1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1110 break;
1111 }
1112 return mad_send_wr->cur_seg->data;
1113}
1114EXPORT_SYMBOL(ib_get_rmpp_segment);
1115
1116static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1117{
1118 if (mad_send_wr->send_buf.seg_count)
1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1120 mad_send_wr->seg_num);
1121 else
1122 return mad_send_wr->send_buf.mad +
1123 mad_send_wr->send_buf.hdr_len;
1124}
1125
1126void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1127{
1128 struct ib_mad_agent_private *mad_agent_priv;
1129 struct ib_mad_send_wr_private *mad_send_wr;
1130
1131 mad_agent_priv = container_of(send_buf->mad_agent,
1132 struct ib_mad_agent_private, agent);
1133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1134 send_buf);
1135
1136 free_send_rmpp_list(mad_send_wr);
1137 kfree(send_buf->mad);
1138 deref_mad_agent(mad_agent_priv);
1139}
1140EXPORT_SYMBOL(ib_free_send_mad);
1141
1142int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1143{
1144 struct ib_mad_qp_info *qp_info;
1145 struct list_head *list;
1146 struct ib_send_wr *bad_send_wr;
1147 struct ib_mad_agent *mad_agent;
1148 struct ib_sge *sge;
1149 unsigned long flags;
1150 int ret;
1151
1152
1153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1154 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1155 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1156 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1157
1158 mad_agent = mad_send_wr->send_buf.mad_agent;
1159 sge = mad_send_wr->sg_list;
1160 sge[0].addr = ib_dma_map_single(mad_agent->device,
1161 mad_send_wr->send_buf.mad,
1162 sge[0].length,
1163 DMA_TO_DEVICE);
1164 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1165 return -ENOMEM;
1166
1167 mad_send_wr->header_mapping = sge[0].addr;
1168
1169 sge[1].addr = ib_dma_map_single(mad_agent->device,
1170 ib_get_payload(mad_send_wr),
1171 sge[1].length,
1172 DMA_TO_DEVICE);
1173 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1174 ib_dma_unmap_single(mad_agent->device,
1175 mad_send_wr->header_mapping,
1176 sge[0].length, DMA_TO_DEVICE);
1177 return -ENOMEM;
1178 }
1179 mad_send_wr->payload_mapping = sge[1].addr;
1180
1181 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1182 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1183 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1184 &bad_send_wr);
1185 list = &qp_info->send_queue.list;
1186 } else {
1187 ret = 0;
1188 list = &qp_info->overflow_list;
1189 }
1190
1191 if (!ret) {
1192 qp_info->send_queue.count++;
1193 list_add_tail(&mad_send_wr->mad_list.list, list);
1194 }
1195 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1196 if (ret) {
1197 ib_dma_unmap_single(mad_agent->device,
1198 mad_send_wr->header_mapping,
1199 sge[0].length, DMA_TO_DEVICE);
1200 ib_dma_unmap_single(mad_agent->device,
1201 mad_send_wr->payload_mapping,
1202 sge[1].length, DMA_TO_DEVICE);
1203 }
1204 return ret;
1205}
1206
1207
1208
1209
1210
1211int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1212 struct ib_mad_send_buf **bad_send_buf)
1213{
1214 struct ib_mad_agent_private *mad_agent_priv;
1215 struct ib_mad_send_buf *next_send_buf;
1216 struct ib_mad_send_wr_private *mad_send_wr;
1217 unsigned long flags;
1218 int ret = -EINVAL;
1219
1220
1221 for (; send_buf; send_buf = next_send_buf) {
1222
1223 mad_send_wr = container_of(send_buf,
1224 struct ib_mad_send_wr_private,
1225 send_buf);
1226 mad_agent_priv = mad_send_wr->mad_agent_priv;
1227
1228 if (!send_buf->mad_agent->send_handler ||
1229 (send_buf->timeout_ms &&
1230 !send_buf->mad_agent->recv_handler)) {
1231 ret = -EINVAL;
1232 goto error;
1233 }
1234
1235 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1236 if (mad_agent_priv->agent.rmpp_version) {
1237 ret = -EINVAL;
1238 goto error;
1239 }
1240 }
1241
1242
1243
1244
1245
1246
1247 next_send_buf = send_buf->next;
1248 mad_send_wr->send_wr.ah = send_buf->ah;
1249
1250 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1252 ret = handle_outgoing_dr_smp(mad_agent_priv,
1253 mad_send_wr);
1254 if (ret < 0)
1255 goto error;
1256 else if (ret == 1)
1257 continue;
1258 }
1259
1260 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1261
1262 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1263 mad_send_wr->max_retries = send_buf->retries;
1264 mad_send_wr->retries_left = send_buf->retries;
1265 send_buf->retries = 0;
1266
1267 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1268 mad_send_wr->status = IB_WC_SUCCESS;
1269
1270
1271 atomic_inc(&mad_agent_priv->refcount);
1272 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1273 list_add_tail(&mad_send_wr->agent_list,
1274 &mad_agent_priv->send_list);
1275 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1276
1277 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1278 ret = ib_send_rmpp_mad(mad_send_wr);
1279 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1280 ret = ib_send_mad(mad_send_wr);
1281 } else
1282 ret = ib_send_mad(mad_send_wr);
1283 if (ret < 0) {
1284
1285 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1286 list_del(&mad_send_wr->agent_list);
1287 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1288 atomic_dec(&mad_agent_priv->refcount);
1289 goto error;
1290 }
1291 }
1292 return 0;
1293error:
1294 if (bad_send_buf)
1295 *bad_send_buf = send_buf;
1296 return ret;
1297}
1298EXPORT_SYMBOL(ib_post_send_mad);
1299
1300
1301
1302
1303
1304void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1305{
1306 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1307 struct ib_mad_private_header *mad_priv_hdr;
1308 struct ib_mad_private *priv;
1309 struct list_head free_list;
1310
1311 INIT_LIST_HEAD(&free_list);
1312 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1313
1314 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1315 &free_list, list) {
1316 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1317 recv_buf);
1318 mad_priv_hdr = container_of(mad_recv_wc,
1319 struct ib_mad_private_header,
1320 recv_wc);
1321 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1322 header);
1323 kfree(priv);
1324 }
1325}
1326EXPORT_SYMBOL(ib_free_recv_mad);
1327
1328struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1329 u8 rmpp_version,
1330 ib_mad_send_handler send_handler,
1331 ib_mad_recv_handler recv_handler,
1332 void *context)
1333{
1334 return ERR_PTR(-EINVAL);
1335}
1336EXPORT_SYMBOL(ib_redirect_mad_qp);
1337
1338int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1339 struct ib_wc *wc)
1340{
1341 dev_err(&mad_agent->device->dev,
1342 "ib_process_mad_wc() not implemented yet\n");
1343 return 0;
1344}
1345EXPORT_SYMBOL(ib_process_mad_wc);
1346
1347static int method_in_use(struct ib_mad_mgmt_method_table **method,
1348 struct ib_mad_reg_req *mad_reg_req)
1349{
1350 int i;
1351
1352 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1353 if ((*method)->agent[i]) {
1354 pr_err("Method %d already in use\n", i);
1355 return -EINVAL;
1356 }
1357 }
1358 return 0;
1359}
1360
1361static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1362{
1363
1364 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1365 if (!*method) {
1366 pr_err("No memory for ib_mad_mgmt_method_table\n");
1367 return -ENOMEM;
1368 }
1369
1370 return 0;
1371}
1372
1373
1374
1375
1376static int check_method_table(struct ib_mad_mgmt_method_table *method)
1377{
1378 int i;
1379
1380 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1381 if (method->agent[i])
1382 return 1;
1383 return 0;
1384}
1385
1386
1387
1388
1389static int check_class_table(struct ib_mad_mgmt_class_table *class)
1390{
1391 int i;
1392
1393 for (i = 0; i < MAX_MGMT_CLASS; i++)
1394 if (class->method_table[i])
1395 return 1;
1396 return 0;
1397}
1398
1399static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1400{
1401 int i;
1402
1403 for (i = 0; i < MAX_MGMT_OUI; i++)
1404 if (vendor_class->method_table[i])
1405 return 1;
1406 return 0;
1407}
1408
1409static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1410 const char *oui)
1411{
1412 int i;
1413
1414 for (i = 0; i < MAX_MGMT_OUI; i++)
1415
1416 if (!memcmp(vendor_class->oui[i], oui, 3))
1417 return i;
1418
1419 return -1;
1420}
1421
1422static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1423{
1424 int i;
1425
1426 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1427 if (vendor->vendor_class[i])
1428 return 1;
1429
1430 return 0;
1431}
1432
1433static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1434 struct ib_mad_agent_private *agent)
1435{
1436 int i;
1437
1438
1439 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1440 if (method->agent[i] == agent) {
1441 method->agent[i] = NULL;
1442 }
1443 }
1444}
1445
1446static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1447 struct ib_mad_agent_private *agent_priv,
1448 u8 mgmt_class)
1449{
1450 struct ib_mad_port_private *port_priv;
1451 struct ib_mad_mgmt_class_table **class;
1452 struct ib_mad_mgmt_method_table **method;
1453 int i, ret;
1454
1455 port_priv = agent_priv->qp_info->port_priv;
1456 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1457 if (!*class) {
1458
1459 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1460 if (!*class) {
1461 dev_err(&agent_priv->agent.device->dev,
1462 "No memory for ib_mad_mgmt_class_table\n");
1463 ret = -ENOMEM;
1464 goto error1;
1465 }
1466
1467
1468 method = &(*class)->method_table[mgmt_class];
1469 if ((ret = allocate_method_table(method)))
1470 goto error2;
1471 } else {
1472 method = &(*class)->method_table[mgmt_class];
1473 if (!*method) {
1474
1475 if ((ret = allocate_method_table(method)))
1476 goto error1;
1477 }
1478 }
1479
1480
1481 if (method_in_use(method, mad_reg_req))
1482 goto error3;
1483
1484
1485 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1486 (*method)->agent[i] = agent_priv;
1487
1488 return 0;
1489
1490error3:
1491
1492 remove_methods_mad_agent(*method, agent_priv);
1493
1494 if (!check_method_table(*method)) {
1495
1496 kfree(*method);
1497 *method = NULL;
1498 }
1499 ret = -EINVAL;
1500 goto error1;
1501error2:
1502 kfree(*class);
1503 *class = NULL;
1504error1:
1505 return ret;
1506}
1507
1508static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1509 struct ib_mad_agent_private *agent_priv)
1510{
1511 struct ib_mad_port_private *port_priv;
1512 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1513 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1514 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1515 struct ib_mad_mgmt_method_table **method;
1516 int i, ret = -ENOMEM;
1517 u8 vclass;
1518
1519
1520 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1521 port_priv = agent_priv->qp_info->port_priv;
1522 vendor_table = &port_priv->version[
1523 mad_reg_req->mgmt_class_version].vendor;
1524 if (!*vendor_table) {
1525
1526 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1527 if (!vendor) {
1528 dev_err(&agent_priv->agent.device->dev,
1529 "No memory for ib_mad_mgmt_vendor_class_table\n");
1530 goto error1;
1531 }
1532
1533 *vendor_table = vendor;
1534 }
1535 if (!(*vendor_table)->vendor_class[vclass]) {
1536
1537 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1538 if (!vendor_class) {
1539 dev_err(&agent_priv->agent.device->dev,
1540 "No memory for ib_mad_mgmt_vendor_class\n");
1541 goto error2;
1542 }
1543
1544 (*vendor_table)->vendor_class[vclass] = vendor_class;
1545 }
1546 for (i = 0; i < MAX_MGMT_OUI; i++) {
1547
1548 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1549 mad_reg_req->oui, 3)) {
1550 method = &(*vendor_table)->vendor_class[
1551 vclass]->method_table[i];
1552 BUG_ON(!*method);
1553 goto check_in_use;
1554 }
1555 }
1556 for (i = 0; i < MAX_MGMT_OUI; i++) {
1557
1558 if (!is_vendor_oui((*vendor_table)->vendor_class[
1559 vclass]->oui[i])) {
1560 method = &(*vendor_table)->vendor_class[
1561 vclass]->method_table[i];
1562 BUG_ON(*method);
1563
1564 if ((ret = allocate_method_table(method)))
1565 goto error3;
1566 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1567 mad_reg_req->oui, 3);
1568 goto check_in_use;
1569 }
1570 }
1571 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1572 goto error3;
1573
1574check_in_use:
1575
1576 if (method_in_use(method, mad_reg_req))
1577 goto error4;
1578
1579
1580 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1581 (*method)->agent[i] = agent_priv;
1582
1583 return 0;
1584
1585error4:
1586
1587 remove_methods_mad_agent(*method, agent_priv);
1588
1589 if (!check_method_table(*method)) {
1590
1591 kfree(*method);
1592 *method = NULL;
1593 }
1594 ret = -EINVAL;
1595error3:
1596 if (vendor_class) {
1597 (*vendor_table)->vendor_class[vclass] = NULL;
1598 kfree(vendor_class);
1599 }
1600error2:
1601 if (vendor) {
1602 *vendor_table = NULL;
1603 kfree(vendor);
1604 }
1605error1:
1606 return ret;
1607}
1608
1609static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1610{
1611 struct ib_mad_port_private *port_priv;
1612 struct ib_mad_mgmt_class_table *class;
1613 struct ib_mad_mgmt_method_table *method;
1614 struct ib_mad_mgmt_vendor_class_table *vendor;
1615 struct ib_mad_mgmt_vendor_class *vendor_class;
1616 int index;
1617 u8 mgmt_class;
1618
1619
1620
1621
1622
1623 if (!agent_priv->reg_req) {
1624 goto out;
1625 }
1626
1627 port_priv = agent_priv->qp_info->port_priv;
1628 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1629 class = port_priv->version[
1630 agent_priv->reg_req->mgmt_class_version].class;
1631 if (!class)
1632 goto vendor_check;
1633
1634 method = class->method_table[mgmt_class];
1635 if (method) {
1636
1637 remove_methods_mad_agent(method, agent_priv);
1638
1639 if (!check_method_table(method)) {
1640
1641 kfree(method);
1642 class->method_table[mgmt_class] = NULL;
1643
1644 if (!check_class_table(class)) {
1645
1646 kfree(class);
1647 port_priv->version[
1648 agent_priv->reg_req->
1649 mgmt_class_version].class = NULL;
1650 }
1651 }
1652 }
1653
1654vendor_check:
1655 if (!is_vendor_class(mgmt_class))
1656 goto out;
1657
1658
1659 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1660 vendor = port_priv->version[
1661 agent_priv->reg_req->mgmt_class_version].vendor;
1662
1663 if (!vendor)
1664 goto out;
1665
1666 vendor_class = vendor->vendor_class[mgmt_class];
1667 if (vendor_class) {
1668 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1669 if (index < 0)
1670 goto out;
1671 method = vendor_class->method_table[index];
1672 if (method) {
1673
1674 remove_methods_mad_agent(method, agent_priv);
1675
1676
1677
1678
1679 if (!check_method_table(method)) {
1680
1681 kfree(method);
1682 vendor_class->method_table[index] = NULL;
1683 memset(vendor_class->oui[index], 0, 3);
1684
1685 if (!check_vendor_class(vendor_class)) {
1686
1687 kfree(vendor_class);
1688 vendor->vendor_class[mgmt_class] = NULL;
1689
1690 if (!check_vendor_table(vendor)) {
1691 kfree(vendor);
1692 port_priv->version[
1693 agent_priv->reg_req->
1694 mgmt_class_version].
1695 vendor = NULL;
1696 }
1697 }
1698 }
1699 }
1700 }
1701
1702out:
1703 return;
1704}
1705
1706static struct ib_mad_agent_private *
1707find_mad_agent(struct ib_mad_port_private *port_priv,
1708 const struct ib_mad_hdr *mad_hdr)
1709{
1710 struct ib_mad_agent_private *mad_agent = NULL;
1711 unsigned long flags;
1712
1713 spin_lock_irqsave(&port_priv->reg_lock, flags);
1714 if (ib_response_mad(mad_hdr)) {
1715 u32 hi_tid;
1716 struct ib_mad_agent_private *entry;
1717
1718
1719
1720
1721
1722 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1723 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1724 if (entry->agent.hi_tid == hi_tid) {
1725 mad_agent = entry;
1726 break;
1727 }
1728 }
1729 } else {
1730 struct ib_mad_mgmt_class_table *class;
1731 struct ib_mad_mgmt_method_table *method;
1732 struct ib_mad_mgmt_vendor_class_table *vendor;
1733 struct ib_mad_mgmt_vendor_class *vendor_class;
1734 const struct ib_vendor_mad *vendor_mad;
1735 int index;
1736
1737
1738
1739
1740
1741 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1742 goto out;
1743 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1744 class = port_priv->version[
1745 mad_hdr->class_version].class;
1746 if (!class)
1747 goto out;
1748 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1749 IB_MGMT_MAX_METHODS)
1750 goto out;
1751 method = class->method_table[convert_mgmt_class(
1752 mad_hdr->mgmt_class)];
1753 if (method)
1754 mad_agent = method->agent[mad_hdr->method &
1755 ~IB_MGMT_METHOD_RESP];
1756 } else {
1757 vendor = port_priv->version[
1758 mad_hdr->class_version].vendor;
1759 if (!vendor)
1760 goto out;
1761 vendor_class = vendor->vendor_class[vendor_class_index(
1762 mad_hdr->mgmt_class)];
1763 if (!vendor_class)
1764 goto out;
1765
1766 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1767 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1768 if (index == -1)
1769 goto out;
1770 method = vendor_class->method_table[index];
1771 if (method) {
1772 mad_agent = method->agent[mad_hdr->method &
1773 ~IB_MGMT_METHOD_RESP];
1774 }
1775 }
1776 }
1777
1778 if (mad_agent) {
1779 if (mad_agent->agent.recv_handler)
1780 atomic_inc(&mad_agent->refcount);
1781 else {
1782 dev_notice(&port_priv->device->dev,
1783 "No receive handler for client %p on port %d\n",
1784 &mad_agent->agent, port_priv->port_num);
1785 mad_agent = NULL;
1786 }
1787 }
1788out:
1789 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1790
1791 return mad_agent;
1792}
1793
1794static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1795 const struct ib_mad_qp_info *qp_info,
1796 bool opa)
1797{
1798 int valid = 0;
1799 u32 qp_num = qp_info->qp->qp_num;
1800
1801
1802 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1803 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1804 pr_err("MAD received with unsupported base version %d %s\n",
1805 mad_hdr->base_version, opa ? "(opa)" : "");
1806 goto out;
1807 }
1808
1809
1810 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1811 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1812 if (qp_num == 0)
1813 valid = 1;
1814 } else {
1815
1816 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1817 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1818 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1819 goto out;
1820
1821 if (qp_num != 0)
1822 valid = 1;
1823 }
1824
1825out:
1826 return valid;
1827}
1828
1829static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1830 const struct ib_mad_hdr *mad_hdr)
1831{
1832 struct ib_rmpp_mad *rmpp_mad;
1833
1834 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1835 return !mad_agent_priv->agent.rmpp_version ||
1836 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1837 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1838 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1839 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1840}
1841
1842static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1843 const struct ib_mad_recv_wc *rwc)
1844{
1845 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1846 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1847}
1848
1849static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1850 const struct ib_mad_send_wr_private *wr,
1851 const struct ib_mad_recv_wc *rwc )
1852{
1853 struct ib_ah_attr attr;
1854 u8 send_resp, rcv_resp;
1855 union ib_gid sgid;
1856 struct ib_device *device = mad_agent_priv->agent.device;
1857 u8 port_num = mad_agent_priv->agent.port_num;
1858 u8 lmc;
1859
1860 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1861 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1862
1863 if (send_resp == rcv_resp)
1864
1865 return 0;
1866
1867 if (ib_query_ah(wr->send_buf.ah, &attr))
1868
1869 return 0;
1870
1871 if (!!(attr.ah_flags & IB_AH_GRH) !=
1872 !!(rwc->wc->wc_flags & IB_WC_GRH))
1873
1874 return 0;
1875
1876 if (!send_resp && rcv_resp) {
1877
1878 if (!(attr.ah_flags & IB_AH_GRH)) {
1879 if (ib_get_cached_lmc(device, port_num, &lmc))
1880 return 0;
1881 return (!lmc || !((attr.src_path_bits ^
1882 rwc->wc->dlid_path_bits) &
1883 ((1 << lmc) - 1)));
1884 } else {
1885 if (ib_get_cached_gid(device, port_num,
1886 attr.grh.sgid_index, &sgid, NULL))
1887 return 0;
1888 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1889 16);
1890 }
1891 }
1892
1893 if (!(attr.ah_flags & IB_AH_GRH))
1894 return attr.dlid == rwc->wc->slid;
1895 else
1896 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1897 16);
1898}
1899
1900static inline int is_direct(u8 class)
1901{
1902 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1903}
1904
1905struct ib_mad_send_wr_private*
1906ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1907 const struct ib_mad_recv_wc *wc)
1908{
1909 struct ib_mad_send_wr_private *wr;
1910 const struct ib_mad_hdr *mad_hdr;
1911
1912 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1913
1914 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1915 if ((wr->tid == mad_hdr->tid) &&
1916 rcv_has_same_class(wr, wc) &&
1917
1918
1919
1920
1921 (is_direct(mad_hdr->mgmt_class) ||
1922 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1923 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1924 }
1925
1926
1927
1928
1929
1930 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1931 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1932 wr->tid == mad_hdr->tid &&
1933 wr->timeout &&
1934 rcv_has_same_class(wr, wc) &&
1935
1936
1937
1938
1939 (is_direct(mad_hdr->mgmt_class) ||
1940 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1941
1942 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1943 }
1944 return NULL;
1945}
1946
1947void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1948{
1949 mad_send_wr->timeout = 0;
1950 if (mad_send_wr->refcount == 1)
1951 list_move_tail(&mad_send_wr->agent_list,
1952 &mad_send_wr->mad_agent_priv->done_list);
1953}
1954
1955static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1956 struct ib_mad_recv_wc *mad_recv_wc)
1957{
1958 struct ib_mad_send_wr_private *mad_send_wr;
1959 struct ib_mad_send_wc mad_send_wc;
1960 unsigned long flags;
1961
1962 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1963 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1964 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1965 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1966 mad_recv_wc);
1967 if (!mad_recv_wc) {
1968 deref_mad_agent(mad_agent_priv);
1969 return;
1970 }
1971 }
1972
1973
1974 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1975 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1976 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1977 if (!mad_send_wr) {
1978 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1979 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1980 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1981 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1982 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1983
1984
1985
1986 mad_agent_priv->agent.recv_handler(
1987 &mad_agent_priv->agent, NULL,
1988 mad_recv_wc);
1989 atomic_dec(&mad_agent_priv->refcount);
1990 } else {
1991
1992
1993 ib_free_recv_mad(mad_recv_wc);
1994 deref_mad_agent(mad_agent_priv);
1995 return;
1996 }
1997 } else {
1998 ib_mark_mad_done(mad_send_wr);
1999 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2000
2001
2002 mad_agent_priv->agent.recv_handler(
2003 &mad_agent_priv->agent,
2004 &mad_send_wr->send_buf,
2005 mad_recv_wc);
2006 atomic_dec(&mad_agent_priv->refcount);
2007
2008 mad_send_wc.status = IB_WC_SUCCESS;
2009 mad_send_wc.vendor_err = 0;
2010 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2011 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2012 }
2013 } else {
2014 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2015 mad_recv_wc);
2016 deref_mad_agent(mad_agent_priv);
2017 }
2018}
2019
2020static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2021 const struct ib_mad_qp_info *qp_info,
2022 const struct ib_wc *wc,
2023 int port_num,
2024 struct ib_mad_private *recv,
2025 struct ib_mad_private *response)
2026{
2027 enum smi_forward_action retsmi;
2028 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2029
2030 if (smi_handle_dr_smp_recv(smp,
2031 rdma_cap_ib_switch(port_priv->device),
2032 port_num,
2033 port_priv->device->phys_port_cnt) ==
2034 IB_SMI_DISCARD)
2035 return IB_SMI_DISCARD;
2036
2037 retsmi = smi_check_forward_dr_smp(smp);
2038 if (retsmi == IB_SMI_LOCAL)
2039 return IB_SMI_HANDLE;
2040
2041 if (retsmi == IB_SMI_SEND) {
2042 if (smi_handle_dr_smp_send(smp,
2043 rdma_cap_ib_switch(port_priv->device),
2044 port_num) == IB_SMI_DISCARD)
2045 return IB_SMI_DISCARD;
2046
2047 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2048 return IB_SMI_DISCARD;
2049 } else if (rdma_cap_ib_switch(port_priv->device)) {
2050
2051 memcpy(response, recv, mad_priv_size(response));
2052 response->header.recv_wc.wc = &response->header.wc;
2053 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2054 response->header.recv_wc.recv_buf.grh = &response->grh;
2055
2056 agent_send_response((const struct ib_mad_hdr *)response->mad,
2057 &response->grh, wc,
2058 port_priv->device,
2059 smi_get_fwd_port(smp),
2060 qp_info->qp->qp_num,
2061 response->mad_size,
2062 false);
2063
2064 return IB_SMI_DISCARD;
2065 }
2066 return IB_SMI_HANDLE;
2067}
2068
2069static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2070 struct ib_mad_private *response,
2071 size_t *resp_len, bool opa)
2072{
2073 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2074 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2075
2076 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2077 recv_hdr->method == IB_MGMT_METHOD_SET) {
2078 memcpy(response, recv, mad_priv_size(response));
2079 response->header.recv_wc.wc = &response->header.wc;
2080 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2081 response->header.recv_wc.recv_buf.grh = &response->grh;
2082 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2083 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2084 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2085 resp_hdr->status |= IB_SMP_DIRECTION;
2086
2087 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2088 if (recv_hdr->mgmt_class ==
2089 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2090 recv_hdr->mgmt_class ==
2091 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2092 *resp_len = opa_get_smp_header_size(
2093 (struct opa_smp *)recv->mad);
2094 else
2095 *resp_len = sizeof(struct ib_mad_hdr);
2096 }
2097
2098 return true;
2099 } else {
2100 return false;
2101 }
2102}
2103
2104static enum smi_action
2105handle_opa_smi(struct ib_mad_port_private *port_priv,
2106 struct ib_mad_qp_info *qp_info,
2107 struct ib_wc *wc,
2108 int port_num,
2109 struct ib_mad_private *recv,
2110 struct ib_mad_private *response)
2111{
2112 enum smi_forward_action retsmi;
2113 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2114
2115 if (opa_smi_handle_dr_smp_recv(smp,
2116 rdma_cap_ib_switch(port_priv->device),
2117 port_num,
2118 port_priv->device->phys_port_cnt) ==
2119 IB_SMI_DISCARD)
2120 return IB_SMI_DISCARD;
2121
2122 retsmi = opa_smi_check_forward_dr_smp(smp);
2123 if (retsmi == IB_SMI_LOCAL)
2124 return IB_SMI_HANDLE;
2125
2126 if (retsmi == IB_SMI_SEND) {
2127 if (opa_smi_handle_dr_smp_send(smp,
2128 rdma_cap_ib_switch(port_priv->device),
2129 port_num) == IB_SMI_DISCARD)
2130 return IB_SMI_DISCARD;
2131
2132 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2133 IB_SMI_DISCARD)
2134 return IB_SMI_DISCARD;
2135
2136 } else if (rdma_cap_ib_switch(port_priv->device)) {
2137
2138 memcpy(response, recv, mad_priv_size(response));
2139 response->header.recv_wc.wc = &response->header.wc;
2140 response->header.recv_wc.recv_buf.opa_mad =
2141 (struct opa_mad *)response->mad;
2142 response->header.recv_wc.recv_buf.grh = &response->grh;
2143
2144 agent_send_response((const struct ib_mad_hdr *)response->mad,
2145 &response->grh, wc,
2146 port_priv->device,
2147 opa_smi_get_fwd_port(smp),
2148 qp_info->qp->qp_num,
2149 recv->header.wc.byte_len,
2150 true);
2151
2152 return IB_SMI_DISCARD;
2153 }
2154
2155 return IB_SMI_HANDLE;
2156}
2157
2158static enum smi_action
2159handle_smi(struct ib_mad_port_private *port_priv,
2160 struct ib_mad_qp_info *qp_info,
2161 struct ib_wc *wc,
2162 int port_num,
2163 struct ib_mad_private *recv,
2164 struct ib_mad_private *response,
2165 bool opa)
2166{
2167 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2168
2169 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2170 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2171 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2172 response);
2173
2174 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2175}
2176
2177static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2178{
2179 struct ib_mad_port_private *port_priv = cq->cq_context;
2180 struct ib_mad_list_head *mad_list =
2181 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2182 struct ib_mad_qp_info *qp_info;
2183 struct ib_mad_private_header *mad_priv_hdr;
2184 struct ib_mad_private *recv, *response = NULL;
2185 struct ib_mad_agent_private *mad_agent;
2186 int port_num;
2187 int ret = IB_MAD_RESULT_SUCCESS;
2188 size_t mad_size;
2189 u16 resp_mad_pkey_index = 0;
2190 bool opa;
2191
2192 if (list_empty_careful(&port_priv->port_list))
2193 return;
2194
2195 if (wc->status != IB_WC_SUCCESS) {
2196
2197
2198
2199
2200 return;
2201 }
2202
2203 qp_info = mad_list->mad_queue->qp_info;
2204 dequeue_mad(mad_list);
2205
2206 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2207 qp_info->port_priv->port_num);
2208
2209 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2210 mad_list);
2211 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2212 ib_dma_unmap_single(port_priv->device,
2213 recv->header.mapping,
2214 mad_priv_dma_size(recv),
2215 DMA_FROM_DEVICE);
2216
2217
2218 recv->header.wc = *wc;
2219 recv->header.recv_wc.wc = &recv->header.wc;
2220
2221 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2222 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2223 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2224 } else {
2225 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2226 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2227 }
2228
2229 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2230 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2231
2232 if (atomic_read(&qp_info->snoop_count))
2233 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2234
2235
2236 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2237 goto out;
2238
2239 mad_size = recv->mad_size;
2240 response = alloc_mad_private(mad_size, GFP_KERNEL);
2241 if (!response) {
2242 dev_err(&port_priv->device->dev,
2243 "%s: no memory for response buffer\n", __func__);
2244 goto out;
2245 }
2246
2247 if (rdma_cap_ib_switch(port_priv->device))
2248 port_num = wc->port_num;
2249 else
2250 port_num = port_priv->port_num;
2251
2252 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2253 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2254 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2255 response, opa)
2256 == IB_SMI_DISCARD)
2257 goto out;
2258 }
2259
2260
2261 if (port_priv->device->process_mad) {
2262 ret = port_priv->device->process_mad(port_priv->device, 0,
2263 port_priv->port_num,
2264 wc, &recv->grh,
2265 (const struct ib_mad_hdr *)recv->mad,
2266 recv->mad_size,
2267 (struct ib_mad_hdr *)response->mad,
2268 &mad_size, &resp_mad_pkey_index);
2269
2270 if (opa)
2271 wc->pkey_index = resp_mad_pkey_index;
2272
2273 if (ret & IB_MAD_RESULT_SUCCESS) {
2274 if (ret & IB_MAD_RESULT_CONSUMED)
2275 goto out;
2276 if (ret & IB_MAD_RESULT_REPLY) {
2277 agent_send_response((const struct ib_mad_hdr *)response->mad,
2278 &recv->grh, wc,
2279 port_priv->device,
2280 port_num,
2281 qp_info->qp->qp_num,
2282 mad_size, opa);
2283 goto out;
2284 }
2285 }
2286 }
2287
2288 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2289 if (mad_agent) {
2290 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2291
2292
2293
2294
2295 recv = NULL;
2296 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2297 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2298 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2299 port_priv->device, port_num,
2300 qp_info->qp->qp_num, mad_size, opa);
2301 }
2302
2303out:
2304
2305 if (response) {
2306 ib_mad_post_receive_mads(qp_info, response);
2307 kfree(recv);
2308 } else
2309 ib_mad_post_receive_mads(qp_info, recv);
2310}
2311
2312static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2313{
2314 struct ib_mad_send_wr_private *mad_send_wr;
2315 unsigned long delay;
2316
2317 if (list_empty(&mad_agent_priv->wait_list)) {
2318 cancel_delayed_work(&mad_agent_priv->timed_work);
2319 } else {
2320 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2321 struct ib_mad_send_wr_private,
2322 agent_list);
2323
2324 if (time_after(mad_agent_priv->timeout,
2325 mad_send_wr->timeout)) {
2326 mad_agent_priv->timeout = mad_send_wr->timeout;
2327 delay = mad_send_wr->timeout - jiffies;
2328 if ((long)delay <= 0)
2329 delay = 1;
2330 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2331 &mad_agent_priv->timed_work, delay);
2332 }
2333 }
2334}
2335
2336static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2337{
2338 struct ib_mad_agent_private *mad_agent_priv;
2339 struct ib_mad_send_wr_private *temp_mad_send_wr;
2340 struct list_head *list_item;
2341 unsigned long delay;
2342
2343 mad_agent_priv = mad_send_wr->mad_agent_priv;
2344 list_del(&mad_send_wr->agent_list);
2345
2346 delay = mad_send_wr->timeout;
2347 mad_send_wr->timeout += jiffies;
2348
2349 if (delay) {
2350 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2351 temp_mad_send_wr = list_entry(list_item,
2352 struct ib_mad_send_wr_private,
2353 agent_list);
2354 if (time_after(mad_send_wr->timeout,
2355 temp_mad_send_wr->timeout))
2356 break;
2357 }
2358 }
2359 else
2360 list_item = &mad_agent_priv->wait_list;
2361 list_add(&mad_send_wr->agent_list, list_item);
2362
2363
2364 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2365 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2366 &mad_agent_priv->timed_work, delay);
2367}
2368
2369void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2370 int timeout_ms)
2371{
2372 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2373 wait_for_response(mad_send_wr);
2374}
2375
2376
2377
2378
2379void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2380 struct ib_mad_send_wc *mad_send_wc)
2381{
2382 struct ib_mad_agent_private *mad_agent_priv;
2383 unsigned long flags;
2384 int ret;
2385
2386 mad_agent_priv = mad_send_wr->mad_agent_priv;
2387 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2388 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2389 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2390 if (ret == IB_RMPP_RESULT_CONSUMED)
2391 goto done;
2392 } else
2393 ret = IB_RMPP_RESULT_UNHANDLED;
2394
2395 if (mad_send_wc->status != IB_WC_SUCCESS &&
2396 mad_send_wr->status == IB_WC_SUCCESS) {
2397 mad_send_wr->status = mad_send_wc->status;
2398 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2399 }
2400
2401 if (--mad_send_wr->refcount > 0) {
2402 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2403 mad_send_wr->status == IB_WC_SUCCESS) {
2404 wait_for_response(mad_send_wr);
2405 }
2406 goto done;
2407 }
2408
2409
2410 list_del(&mad_send_wr->agent_list);
2411 adjust_timeout(mad_agent_priv);
2412 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2413
2414 if (mad_send_wr->status != IB_WC_SUCCESS )
2415 mad_send_wc->status = mad_send_wr->status;
2416 if (ret == IB_RMPP_RESULT_INTERNAL)
2417 ib_rmpp_send_handler(mad_send_wc);
2418 else
2419 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2420 mad_send_wc);
2421
2422
2423 deref_mad_agent(mad_agent_priv);
2424 return;
2425done:
2426 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2427}
2428
2429static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2430{
2431 struct ib_mad_port_private *port_priv = cq->cq_context;
2432 struct ib_mad_list_head *mad_list =
2433 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2434 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2435 struct ib_mad_qp_info *qp_info;
2436 struct ib_mad_queue *send_queue;
2437 struct ib_send_wr *bad_send_wr;
2438 struct ib_mad_send_wc mad_send_wc;
2439 unsigned long flags;
2440 int ret;
2441
2442 if (list_empty_careful(&port_priv->port_list))
2443 return;
2444
2445 if (wc->status != IB_WC_SUCCESS) {
2446 if (!ib_mad_send_error(port_priv, wc))
2447 return;
2448 }
2449
2450 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2451 mad_list);
2452 send_queue = mad_list->mad_queue;
2453 qp_info = send_queue->qp_info;
2454
2455retry:
2456 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2457 mad_send_wr->header_mapping,
2458 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2459 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2460 mad_send_wr->payload_mapping,
2461 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2462 queued_send_wr = NULL;
2463 spin_lock_irqsave(&send_queue->lock, flags);
2464 list_del(&mad_list->list);
2465
2466
2467 if (send_queue->count-- > send_queue->max_active) {
2468 mad_list = container_of(qp_info->overflow_list.next,
2469 struct ib_mad_list_head, list);
2470 queued_send_wr = container_of(mad_list,
2471 struct ib_mad_send_wr_private,
2472 mad_list);
2473 list_move_tail(&mad_list->list, &send_queue->list);
2474 }
2475 spin_unlock_irqrestore(&send_queue->lock, flags);
2476
2477 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2478 mad_send_wc.status = wc->status;
2479 mad_send_wc.vendor_err = wc->vendor_err;
2480 if (atomic_read(&qp_info->snoop_count))
2481 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2482 IB_MAD_SNOOP_SEND_COMPLETIONS);
2483 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2484
2485 if (queued_send_wr) {
2486 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2487 &bad_send_wr);
2488 if (ret) {
2489 dev_err(&port_priv->device->dev,
2490 "ib_post_send failed: %d\n", ret);
2491 mad_send_wr = queued_send_wr;
2492 wc->status = IB_WC_LOC_QP_OP_ERR;
2493 goto retry;
2494 }
2495 }
2496}
2497
2498static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2499{
2500 struct ib_mad_send_wr_private *mad_send_wr;
2501 struct ib_mad_list_head *mad_list;
2502 unsigned long flags;
2503
2504 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2505 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2506 mad_send_wr = container_of(mad_list,
2507 struct ib_mad_send_wr_private,
2508 mad_list);
2509 mad_send_wr->retry = 1;
2510 }
2511 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2512}
2513
2514static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2515 struct ib_wc *wc)
2516{
2517 struct ib_mad_list_head *mad_list =
2518 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2519 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2520 struct ib_mad_send_wr_private *mad_send_wr;
2521 int ret;
2522
2523
2524
2525
2526
2527 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2528 mad_list);
2529 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2530 if (mad_send_wr->retry) {
2531
2532 struct ib_send_wr *bad_send_wr;
2533
2534 mad_send_wr->retry = 0;
2535 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2536 &bad_send_wr);
2537 if (!ret)
2538 return false;
2539 }
2540 } else {
2541 struct ib_qp_attr *attr;
2542
2543
2544 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2545 if (attr) {
2546 attr->qp_state = IB_QPS_RTS;
2547 attr->cur_qp_state = IB_QPS_SQE;
2548 ret = ib_modify_qp(qp_info->qp, attr,
2549 IB_QP_STATE | IB_QP_CUR_STATE);
2550 kfree(attr);
2551 if (ret)
2552 dev_err(&port_priv->device->dev,
2553 "%s - ib_modify_qp to RTS: %d\n",
2554 __func__, ret);
2555 else
2556 mark_sends_for_retry(qp_info);
2557 }
2558 }
2559
2560 return true;
2561}
2562
2563static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2564{
2565 unsigned long flags;
2566 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2567 struct ib_mad_send_wc mad_send_wc;
2568 struct list_head cancel_list;
2569
2570 INIT_LIST_HEAD(&cancel_list);
2571
2572 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2573 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2574 &mad_agent_priv->send_list, agent_list) {
2575 if (mad_send_wr->status == IB_WC_SUCCESS) {
2576 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2577 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2578 }
2579 }
2580
2581
2582 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2584
2585
2586 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2587 mad_send_wc.vendor_err = 0;
2588
2589 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2590 &cancel_list, agent_list) {
2591 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2592 list_del(&mad_send_wr->agent_list);
2593 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2594 &mad_send_wc);
2595 atomic_dec(&mad_agent_priv->refcount);
2596 }
2597}
2598
2599static struct ib_mad_send_wr_private*
2600find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2601 struct ib_mad_send_buf *send_buf)
2602{
2603 struct ib_mad_send_wr_private *mad_send_wr;
2604
2605 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2606 agent_list) {
2607 if (&mad_send_wr->send_buf == send_buf)
2608 return mad_send_wr;
2609 }
2610
2611 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2612 agent_list) {
2613 if (is_rmpp_data_mad(mad_agent_priv,
2614 mad_send_wr->send_buf.mad) &&
2615 &mad_send_wr->send_buf == send_buf)
2616 return mad_send_wr;
2617 }
2618 return NULL;
2619}
2620
2621int ib_modify_mad(struct ib_mad_agent *mad_agent,
2622 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2623{
2624 struct ib_mad_agent_private *mad_agent_priv;
2625 struct ib_mad_send_wr_private *mad_send_wr;
2626 unsigned long flags;
2627 int active;
2628
2629 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2630 agent);
2631 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2632 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2633 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2634 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2635 return -EINVAL;
2636 }
2637
2638 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2639 if (!timeout_ms) {
2640 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2641 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2642 }
2643
2644 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2645 if (active)
2646 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2647 else
2648 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2649
2650 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2651 return 0;
2652}
2653EXPORT_SYMBOL(ib_modify_mad);
2654
2655void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2656 struct ib_mad_send_buf *send_buf)
2657{
2658 ib_modify_mad(mad_agent, send_buf, 0);
2659}
2660EXPORT_SYMBOL(ib_cancel_mad);
2661
2662static void local_completions(struct work_struct *work)
2663{
2664 struct ib_mad_agent_private *mad_agent_priv;
2665 struct ib_mad_local_private *local;
2666 struct ib_mad_agent_private *recv_mad_agent;
2667 unsigned long flags;
2668 int free_mad;
2669 struct ib_wc wc;
2670 struct ib_mad_send_wc mad_send_wc;
2671 bool opa;
2672
2673 mad_agent_priv =
2674 container_of(work, struct ib_mad_agent_private, local_work);
2675
2676 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2677 mad_agent_priv->qp_info->port_priv->port_num);
2678
2679 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2680 while (!list_empty(&mad_agent_priv->local_list)) {
2681 local = list_entry(mad_agent_priv->local_list.next,
2682 struct ib_mad_local_private,
2683 completion_list);
2684 list_del(&local->completion_list);
2685 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2686 free_mad = 0;
2687 if (local->mad_priv) {
2688 u8 base_version;
2689 recv_mad_agent = local->recv_mad_agent;
2690 if (!recv_mad_agent) {
2691 dev_err(&mad_agent_priv->agent.device->dev,
2692 "No receive MAD agent for local completion\n");
2693 free_mad = 1;
2694 goto local_send_completion;
2695 }
2696
2697
2698
2699
2700
2701 build_smp_wc(recv_mad_agent->agent.qp,
2702 local->mad_send_wr->send_wr.wr.wr_cqe,
2703 be16_to_cpu(IB_LID_PERMISSIVE),
2704 local->mad_send_wr->send_wr.pkey_index,
2705 recv_mad_agent->agent.port_num, &wc);
2706
2707 local->mad_priv->header.recv_wc.wc = &wc;
2708
2709 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2710 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2711 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2712 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2713 } else {
2714 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2715 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2716 }
2717
2718 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2719 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2720 &local->mad_priv->header.recv_wc.rmpp_list);
2721 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2722 local->mad_priv->header.recv_wc.recv_buf.mad =
2723 (struct ib_mad *)local->mad_priv->mad;
2724 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2725 snoop_recv(recv_mad_agent->qp_info,
2726 &local->mad_priv->header.recv_wc,
2727 IB_MAD_SNOOP_RECVS);
2728 recv_mad_agent->agent.recv_handler(
2729 &recv_mad_agent->agent,
2730 &local->mad_send_wr->send_buf,
2731 &local->mad_priv->header.recv_wc);
2732 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2733 atomic_dec(&recv_mad_agent->refcount);
2734 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2735 }
2736
2737local_send_completion:
2738
2739 mad_send_wc.status = IB_WC_SUCCESS;
2740 mad_send_wc.vendor_err = 0;
2741 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2742 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2743 snoop_send(mad_agent_priv->qp_info,
2744 &local->mad_send_wr->send_buf,
2745 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2746 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2747 &mad_send_wc);
2748
2749 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2750 atomic_dec(&mad_agent_priv->refcount);
2751 if (free_mad)
2752 kfree(local->mad_priv);
2753 kfree(local);
2754 }
2755 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2756}
2757
2758static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2759{
2760 int ret;
2761
2762 if (!mad_send_wr->retries_left)
2763 return -ETIMEDOUT;
2764
2765 mad_send_wr->retries_left--;
2766 mad_send_wr->send_buf.retries++;
2767
2768 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2769
2770 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2771 ret = ib_retry_rmpp(mad_send_wr);
2772 switch (ret) {
2773 case IB_RMPP_RESULT_UNHANDLED:
2774 ret = ib_send_mad(mad_send_wr);
2775 break;
2776 case IB_RMPP_RESULT_CONSUMED:
2777 ret = 0;
2778 break;
2779 default:
2780 ret = -ECOMM;
2781 break;
2782 }
2783 } else
2784 ret = ib_send_mad(mad_send_wr);
2785
2786 if (!ret) {
2787 mad_send_wr->refcount++;
2788 list_add_tail(&mad_send_wr->agent_list,
2789 &mad_send_wr->mad_agent_priv->send_list);
2790 }
2791 return ret;
2792}
2793
2794static void timeout_sends(struct work_struct *work)
2795{
2796 struct ib_mad_agent_private *mad_agent_priv;
2797 struct ib_mad_send_wr_private *mad_send_wr;
2798 struct ib_mad_send_wc mad_send_wc;
2799 unsigned long flags, delay;
2800
2801 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2802 timed_work.work);
2803 mad_send_wc.vendor_err = 0;
2804
2805 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2806 while (!list_empty(&mad_agent_priv->wait_list)) {
2807 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2808 struct ib_mad_send_wr_private,
2809 agent_list);
2810
2811 if (time_after(mad_send_wr->timeout, jiffies)) {
2812 delay = mad_send_wr->timeout - jiffies;
2813 if ((long)delay <= 0)
2814 delay = 1;
2815 queue_delayed_work(mad_agent_priv->qp_info->
2816 port_priv->wq,
2817 &mad_agent_priv->timed_work, delay);
2818 break;
2819 }
2820
2821 list_del(&mad_send_wr->agent_list);
2822 if (mad_send_wr->status == IB_WC_SUCCESS &&
2823 !retry_send(mad_send_wr))
2824 continue;
2825
2826 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2827
2828 if (mad_send_wr->status == IB_WC_SUCCESS)
2829 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2830 else
2831 mad_send_wc.status = mad_send_wr->status;
2832 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2833 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2834 &mad_send_wc);
2835
2836 atomic_dec(&mad_agent_priv->refcount);
2837 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2838 }
2839 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2840}
2841
2842
2843
2844
2845static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2846 struct ib_mad_private *mad)
2847{
2848 unsigned long flags;
2849 int post, ret;
2850 struct ib_mad_private *mad_priv;
2851 struct ib_sge sg_list;
2852 struct ib_recv_wr recv_wr, *bad_recv_wr;
2853 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2854
2855
2856 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2857
2858
2859 recv_wr.next = NULL;
2860 recv_wr.sg_list = &sg_list;
2861 recv_wr.num_sge = 1;
2862
2863 do {
2864
2865 if (mad) {
2866 mad_priv = mad;
2867 mad = NULL;
2868 } else {
2869 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2870 GFP_ATOMIC);
2871 if (!mad_priv) {
2872 dev_err(&qp_info->port_priv->device->dev,
2873 "No memory for receive buffer\n");
2874 ret = -ENOMEM;
2875 break;
2876 }
2877 }
2878 sg_list.length = mad_priv_dma_size(mad_priv);
2879 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2880 &mad_priv->grh,
2881 mad_priv_dma_size(mad_priv),
2882 DMA_FROM_DEVICE);
2883 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2884 sg_list.addr))) {
2885 ret = -ENOMEM;
2886 break;
2887 }
2888 mad_priv->header.mapping = sg_list.addr;
2889 mad_priv->header.mad_list.mad_queue = recv_queue;
2890 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2891 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2892
2893
2894 spin_lock_irqsave(&recv_queue->lock, flags);
2895 post = (++recv_queue->count < recv_queue->max_active);
2896 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2897 spin_unlock_irqrestore(&recv_queue->lock, flags);
2898 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2899 if (ret) {
2900 spin_lock_irqsave(&recv_queue->lock, flags);
2901 list_del(&mad_priv->header.mad_list.list);
2902 recv_queue->count--;
2903 spin_unlock_irqrestore(&recv_queue->lock, flags);
2904 ib_dma_unmap_single(qp_info->port_priv->device,
2905 mad_priv->header.mapping,
2906 mad_priv_dma_size(mad_priv),
2907 DMA_FROM_DEVICE);
2908 kfree(mad_priv);
2909 dev_err(&qp_info->port_priv->device->dev,
2910 "ib_post_recv failed: %d\n", ret);
2911 break;
2912 }
2913 } while (post);
2914
2915 return ret;
2916}
2917
2918
2919
2920
2921static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2922{
2923 struct ib_mad_private_header *mad_priv_hdr;
2924 struct ib_mad_private *recv;
2925 struct ib_mad_list_head *mad_list;
2926
2927 if (!qp_info->qp)
2928 return;
2929
2930 while (!list_empty(&qp_info->recv_queue.list)) {
2931
2932 mad_list = list_entry(qp_info->recv_queue.list.next,
2933 struct ib_mad_list_head, list);
2934 mad_priv_hdr = container_of(mad_list,
2935 struct ib_mad_private_header,
2936 mad_list);
2937 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2938 header);
2939
2940
2941 list_del(&mad_list->list);
2942
2943 ib_dma_unmap_single(qp_info->port_priv->device,
2944 recv->header.mapping,
2945 mad_priv_dma_size(recv),
2946 DMA_FROM_DEVICE);
2947 kfree(recv);
2948 }
2949
2950 qp_info->recv_queue.count = 0;
2951}
2952
2953
2954
2955
2956static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2957{
2958 int ret, i;
2959 struct ib_qp_attr *attr;
2960 struct ib_qp *qp;
2961 u16 pkey_index;
2962
2963 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2964 if (!attr) {
2965 dev_err(&port_priv->device->dev,
2966 "Couldn't kmalloc ib_qp_attr\n");
2967 return -ENOMEM;
2968 }
2969
2970 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2971 IB_DEFAULT_PKEY_FULL, &pkey_index);
2972 if (ret)
2973 pkey_index = 0;
2974
2975 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2976 qp = port_priv->qp_info[i].qp;
2977 if (!qp)
2978 continue;
2979
2980
2981
2982
2983
2984 attr->qp_state = IB_QPS_INIT;
2985 attr->pkey_index = pkey_index;
2986 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2987 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2988 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2989 if (ret) {
2990 dev_err(&port_priv->device->dev,
2991 "Couldn't change QP%d state to INIT: %d\n",
2992 i, ret);
2993 goto out;
2994 }
2995
2996 attr->qp_state = IB_QPS_RTR;
2997 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2998 if (ret) {
2999 dev_err(&port_priv->device->dev,
3000 "Couldn't change QP%d state to RTR: %d\n",
3001 i, ret);
3002 goto out;
3003 }
3004
3005 attr->qp_state = IB_QPS_RTS;
3006 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3007 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3008 if (ret) {
3009 dev_err(&port_priv->device->dev,
3010 "Couldn't change QP%d state to RTS: %d\n",
3011 i, ret);
3012 goto out;
3013 }
3014 }
3015
3016 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3017 if (ret) {
3018 dev_err(&port_priv->device->dev,
3019 "Failed to request completion notification: %d\n",
3020 ret);
3021 goto out;
3022 }
3023
3024 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3025 if (!port_priv->qp_info[i].qp)
3026 continue;
3027
3028 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3029 if (ret) {
3030 dev_err(&port_priv->device->dev,
3031 "Couldn't post receive WRs\n");
3032 goto out;
3033 }
3034 }
3035out:
3036 kfree(attr);
3037 return ret;
3038}
3039
3040static void qp_event_handler(struct ib_event *event, void *qp_context)
3041{
3042 struct ib_mad_qp_info *qp_info = qp_context;
3043
3044
3045 dev_err(&qp_info->port_priv->device->dev,
3046 "Fatal error (%d) on MAD QP (%d)\n",
3047 event->event, qp_info->qp->qp_num);
3048}
3049
3050static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3051 struct ib_mad_queue *mad_queue)
3052{
3053 mad_queue->qp_info = qp_info;
3054 mad_queue->count = 0;
3055 spin_lock_init(&mad_queue->lock);
3056 INIT_LIST_HEAD(&mad_queue->list);
3057}
3058
3059static void init_mad_qp(struct ib_mad_port_private *port_priv,
3060 struct ib_mad_qp_info *qp_info)
3061{
3062 qp_info->port_priv = port_priv;
3063 init_mad_queue(qp_info, &qp_info->send_queue);
3064 init_mad_queue(qp_info, &qp_info->recv_queue);
3065 INIT_LIST_HEAD(&qp_info->overflow_list);
3066 spin_lock_init(&qp_info->snoop_lock);
3067 qp_info->snoop_table = NULL;
3068 qp_info->snoop_table_size = 0;
3069 atomic_set(&qp_info->snoop_count, 0);
3070}
3071
3072static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3073 enum ib_qp_type qp_type)
3074{
3075 struct ib_qp_init_attr qp_init_attr;
3076 int ret;
3077
3078 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3079 qp_init_attr.send_cq = qp_info->port_priv->cq;
3080 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3081 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3082 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3083 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3084 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3085 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3086 qp_init_attr.qp_type = qp_type;
3087 qp_init_attr.port_num = qp_info->port_priv->port_num;
3088 qp_init_attr.qp_context = qp_info;
3089 qp_init_attr.event_handler = qp_event_handler;
3090 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3091 if (IS_ERR(qp_info->qp)) {
3092 dev_err(&qp_info->port_priv->device->dev,
3093 "Couldn't create ib_mad QP%d\n",
3094 get_spl_qp_index(qp_type));
3095 ret = PTR_ERR(qp_info->qp);
3096 goto error;
3097 }
3098
3099 qp_info->send_queue.max_active = mad_sendq_size;
3100 qp_info->recv_queue.max_active = mad_recvq_size;
3101 return 0;
3102
3103error:
3104 return ret;
3105}
3106
3107static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3108{
3109 if (!qp_info->qp)
3110 return;
3111
3112 ib_destroy_qp(qp_info->qp);
3113 kfree(qp_info->snoop_table);
3114}
3115
3116
3117
3118
3119
3120static int ib_mad_port_open(struct ib_device *device,
3121 int port_num)
3122{
3123 int ret, cq_size;
3124 struct ib_mad_port_private *port_priv;
3125 unsigned long flags;
3126 char name[sizeof "ib_mad123"];
3127 int has_smi;
3128
3129 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3130 return -EFAULT;
3131
3132 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3133 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3134 return -EFAULT;
3135
3136
3137 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3138 if (!port_priv) {
3139 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3140 return -ENOMEM;
3141 }
3142
3143 port_priv->device = device;
3144 port_priv->port_num = port_num;
3145 spin_lock_init(&port_priv->reg_lock);
3146 INIT_LIST_HEAD(&port_priv->agent_list);
3147 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3148 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3149
3150 cq_size = mad_sendq_size + mad_recvq_size;
3151 has_smi = rdma_cap_ib_smi(device, port_num);
3152 if (has_smi)
3153 cq_size *= 2;
3154
3155 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3156 IB_POLL_WORKQUEUE);
3157 if (IS_ERR(port_priv->cq)) {
3158 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3159 ret = PTR_ERR(port_priv->cq);
3160 goto error3;
3161 }
3162
3163 port_priv->pd = ib_alloc_pd(device, 0);
3164 if (IS_ERR(port_priv->pd)) {
3165 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3166 ret = PTR_ERR(port_priv->pd);
3167 goto error4;
3168 }
3169
3170 if (has_smi) {
3171 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3172 if (ret)
3173 goto error6;
3174 }
3175 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3176 if (ret)
3177 goto error7;
3178
3179 snprintf(name, sizeof name, "ib_mad%d", port_num);
3180 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3181 if (!port_priv->wq) {
3182 ret = -ENOMEM;
3183 goto error8;
3184 }
3185
3186 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3187 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3188 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3189
3190 ret = ib_mad_port_start(port_priv);
3191 if (ret) {
3192 dev_err(&device->dev, "Couldn't start port\n");
3193 goto error9;
3194 }
3195
3196 return 0;
3197
3198error9:
3199 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3200 list_del_init(&port_priv->port_list);
3201 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3202
3203 destroy_workqueue(port_priv->wq);
3204error8:
3205 destroy_mad_qp(&port_priv->qp_info[1]);
3206error7:
3207 destroy_mad_qp(&port_priv->qp_info[0]);
3208error6:
3209 ib_dealloc_pd(port_priv->pd);
3210error4:
3211 ib_free_cq(port_priv->cq);
3212 cleanup_recv_queue(&port_priv->qp_info[1]);
3213 cleanup_recv_queue(&port_priv->qp_info[0]);
3214error3:
3215 kfree(port_priv);
3216
3217 return ret;
3218}
3219
3220
3221
3222
3223
3224
3225static int ib_mad_port_close(struct ib_device *device, int port_num)
3226{
3227 struct ib_mad_port_private *port_priv;
3228 unsigned long flags;
3229
3230 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3231 port_priv = __ib_get_mad_port(device, port_num);
3232 if (port_priv == NULL) {
3233 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3234 dev_err(&device->dev, "Port %d not found\n", port_num);
3235 return -ENODEV;
3236 }
3237 list_del_init(&port_priv->port_list);
3238 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3239
3240 destroy_workqueue(port_priv->wq);
3241 destroy_mad_qp(&port_priv->qp_info[1]);
3242 destroy_mad_qp(&port_priv->qp_info[0]);
3243 ib_dealloc_pd(port_priv->pd);
3244 ib_free_cq(port_priv->cq);
3245 cleanup_recv_queue(&port_priv->qp_info[1]);
3246 cleanup_recv_queue(&port_priv->qp_info[0]);
3247
3248
3249 kfree(port_priv);
3250
3251 return 0;
3252}
3253
3254static void ib_mad_init_device(struct ib_device *device)
3255{
3256 int start, i;
3257
3258 start = rdma_start_port(device);
3259
3260 for (i = start; i <= rdma_end_port(device); i++) {
3261 if (!rdma_cap_ib_mad(device, i))
3262 continue;
3263
3264 if (ib_mad_port_open(device, i)) {
3265 dev_err(&device->dev, "Couldn't open port %d\n", i);
3266 goto error;
3267 }
3268 if (ib_agent_port_open(device, i)) {
3269 dev_err(&device->dev,
3270 "Couldn't open port %d for agents\n", i);
3271 goto error_agent;
3272 }
3273 }
3274 return;
3275
3276error_agent:
3277 if (ib_mad_port_close(device, i))
3278 dev_err(&device->dev, "Couldn't close port %d\n", i);
3279
3280error:
3281 while (--i >= start) {
3282 if (!rdma_cap_ib_mad(device, i))
3283 continue;
3284
3285 if (ib_agent_port_close(device, i))
3286 dev_err(&device->dev,
3287 "Couldn't close port %d for agents\n", i);
3288 if (ib_mad_port_close(device, i))
3289 dev_err(&device->dev, "Couldn't close port %d\n", i);
3290 }
3291}
3292
3293static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3294{
3295 int i;
3296
3297 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3298 if (!rdma_cap_ib_mad(device, i))
3299 continue;
3300
3301 if (ib_agent_port_close(device, i))
3302 dev_err(&device->dev,
3303 "Couldn't close port %d for agents\n", i);
3304 if (ib_mad_port_close(device, i))
3305 dev_err(&device->dev, "Couldn't close port %d\n", i);
3306 }
3307}
3308
3309static struct ib_client mad_client = {
3310 .name = "mad",
3311 .add = ib_mad_init_device,
3312 .remove = ib_mad_remove_device
3313};
3314
3315int ib_mad_init(void)
3316{
3317 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3318 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3319
3320 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3321 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3322
3323 INIT_LIST_HEAD(&ib_mad_port_list);
3324
3325 if (ib_register_client(&mad_client)) {
3326 pr_err("Couldn't register ib_mad client\n");
3327 return -EINVAL;
3328 }
3329
3330 return 0;
3331}
3332
3333void ib_mad_cleanup(void)
3334{
3335 ib_unregister_client(&mad_client);
3336}
3337