1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/device.h>
39#include <linux/err.h>
40#include <linux/fs.h>
41#include <linux/cdev.h>
42#include <linux/dma-mapping.h>
43#include <linux/poll.h>
44#include <linux/mutex.h>
45#include <linux/kref.h>
46#include <linux/compat.h>
47#include <linux/sched.h>
48#include <linux/semaphore.h>
49
50#include <asm/uaccess.h>
51
52#include <rdma/ib_mad.h>
53#include <rdma/ib_user_mad.h>
54
55MODULE_AUTHOR("Roland Dreier");
56MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
57MODULE_LICENSE("Dual BSD/GPL");
58
59enum {
60 IB_UMAD_MAX_PORTS = 64,
61 IB_UMAD_MAX_AGENTS = 32,
62
63 IB_UMAD_MAJOR = 231,
64 IB_UMAD_MINOR_BASE = 0
65};
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89struct ib_umad_port {
90 struct cdev *cdev;
91 struct device *dev;
92
93 struct cdev *sm_cdev;
94 struct device *sm_dev;
95 struct semaphore sm_sem;
96
97 struct mutex file_mutex;
98 struct list_head file_list;
99
100 struct ib_device *ib_dev;
101 struct ib_umad_device *umad_dev;
102 int dev_num;
103 u8 port_num;
104};
105
106struct ib_umad_device {
107 int start_port, end_port;
108 struct kref ref;
109 struct ib_umad_port port[0];
110};
111
112struct ib_umad_file {
113 struct mutex mutex;
114 struct ib_umad_port *port;
115 struct list_head recv_list;
116 struct list_head send_list;
117 struct list_head port_list;
118 spinlock_t send_lock;
119 wait_queue_head_t recv_wait;
120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
121 int agents_dead;
122 u8 use_pkey_index;
123 u8 already_used;
124};
125
126struct ib_umad_packet {
127 struct ib_mad_send_buf *msg;
128 struct ib_mad_recv_wc *recv_wc;
129 struct list_head list;
130 int length;
131 struct ib_user_mad mad;
132};
133
134static struct class *umad_class;
135
136static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
137
138static DEFINE_SPINLOCK(port_lock);
139static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
140static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
141
142static void ib_umad_add_one(struct ib_device *device);
143static void ib_umad_remove_one(struct ib_device *device);
144
145static void ib_umad_release_dev(struct kref *ref)
146{
147 struct ib_umad_device *dev =
148 container_of(ref, struct ib_umad_device, ref);
149
150 kfree(dev);
151}
152
153static int hdr_size(struct ib_umad_file *file)
154{
155 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
156 sizeof (struct ib_user_mad_hdr_old);
157}
158
159
160static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
161{
162 return file->agents_dead ? NULL : file->agent[id];
163}
164
165static int queue_packet(struct ib_umad_file *file,
166 struct ib_mad_agent *agent,
167 struct ib_umad_packet *packet)
168{
169 int ret = 1;
170
171 mutex_lock(&file->mutex);
172
173 for (packet->mad.hdr.id = 0;
174 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
175 packet->mad.hdr.id++)
176 if (agent == __get_agent(file, packet->mad.hdr.id)) {
177 list_add_tail(&packet->list, &file->recv_list);
178 wake_up_interruptible(&file->recv_wait);
179 ret = 0;
180 break;
181 }
182
183 mutex_unlock(&file->mutex);
184
185 return ret;
186}
187
188static void dequeue_send(struct ib_umad_file *file,
189 struct ib_umad_packet *packet)
190{
191 spin_lock_irq(&file->send_lock);
192 list_del(&packet->list);
193 spin_unlock_irq(&file->send_lock);
194}
195
196static void send_handler(struct ib_mad_agent *agent,
197 struct ib_mad_send_wc *send_wc)
198{
199 struct ib_umad_file *file = agent->context;
200 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
201
202 dequeue_send(file, packet);
203 ib_destroy_ah(packet->msg->ah);
204 ib_free_send_mad(packet->msg);
205
206 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
207 packet->length = IB_MGMT_MAD_HDR;
208 packet->mad.hdr.status = ETIMEDOUT;
209 if (!queue_packet(file, agent, packet))
210 return;
211 }
212 kfree(packet);
213}
214
215static void recv_handler(struct ib_mad_agent *agent,
216 struct ib_mad_recv_wc *mad_recv_wc)
217{
218 struct ib_umad_file *file = agent->context;
219 struct ib_umad_packet *packet;
220
221 if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
222 goto err1;
223
224 packet = kzalloc(sizeof *packet, GFP_KERNEL);
225 if (!packet)
226 goto err1;
227
228 packet->length = mad_recv_wc->mad_len;
229 packet->recv_wc = mad_recv_wc;
230
231 packet->mad.hdr.status = 0;
232 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
233 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
234 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
235 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
236 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
237 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
238 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
239 if (packet->mad.hdr.grh_present) {
240 struct ib_ah_attr ah_attr;
241
242 ib_init_ah_from_wc(agent->device, agent->port_num,
243 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
244 &ah_attr);
245
246 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
247 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
248 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
249 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
250 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
251 }
252
253 if (queue_packet(file, agent, packet))
254 goto err2;
255 return;
256
257err2:
258 kfree(packet);
259err1:
260 ib_free_recv_mad(mad_recv_wc);
261}
262
263static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
264 struct ib_umad_packet *packet, size_t count)
265{
266 struct ib_mad_recv_buf *recv_buf;
267 int left, seg_payload, offset, max_seg_payload;
268
269
270 recv_buf = &packet->recv_wc->recv_buf;
271 if ((packet->length <= sizeof (*recv_buf->mad) &&
272 count < hdr_size(file) + packet->length) ||
273 (packet->length > sizeof (*recv_buf->mad) &&
274 count < hdr_size(file) + sizeof (*recv_buf->mad)))
275 return -EINVAL;
276
277 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
278 return -EFAULT;
279
280 buf += hdr_size(file);
281 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
282 if (copy_to_user(buf, recv_buf->mad, seg_payload))
283 return -EFAULT;
284
285 if (seg_payload < packet->length) {
286
287
288
289
290 if (count < hdr_size(file) + packet->length) {
291
292
293
294
295 return -ENOSPC;
296 }
297 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
298 max_seg_payload = sizeof (struct ib_mad) - offset;
299
300 for (left = packet->length - seg_payload, buf += seg_payload;
301 left; left -= seg_payload, buf += seg_payload) {
302 recv_buf = container_of(recv_buf->list.next,
303 struct ib_mad_recv_buf, list);
304 seg_payload = min(left, max_seg_payload);
305 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
306 seg_payload))
307 return -EFAULT;
308 }
309 }
310 return hdr_size(file) + packet->length;
311}
312
313static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
314 struct ib_umad_packet *packet, size_t count)
315{
316 ssize_t size = hdr_size(file) + packet->length;
317
318 if (count < size)
319 return -EINVAL;
320
321 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
322 return -EFAULT;
323
324 buf += hdr_size(file);
325
326 if (copy_to_user(buf, packet->mad.data, packet->length))
327 return -EFAULT;
328
329 return size;
330}
331
332static ssize_t ib_umad_read(struct file *filp, char __user *buf,
333 size_t count, loff_t *pos)
334{
335 struct ib_umad_file *file = filp->private_data;
336 struct ib_umad_packet *packet;
337 ssize_t ret;
338
339 if (count < hdr_size(file))
340 return -EINVAL;
341
342 mutex_lock(&file->mutex);
343
344 while (list_empty(&file->recv_list)) {
345 mutex_unlock(&file->mutex);
346
347 if (filp->f_flags & O_NONBLOCK)
348 return -EAGAIN;
349
350 if (wait_event_interruptible(file->recv_wait,
351 !list_empty(&file->recv_list)))
352 return -ERESTARTSYS;
353
354 mutex_lock(&file->mutex);
355 }
356
357 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
358 list_del(&packet->list);
359
360 mutex_unlock(&file->mutex);
361
362 if (packet->recv_wc)
363 ret = copy_recv_mad(file, buf, packet, count);
364 else
365 ret = copy_send_mad(file, buf, packet, count);
366
367 if (ret < 0) {
368
369 mutex_lock(&file->mutex);
370 list_add(&packet->list, &file->recv_list);
371 mutex_unlock(&file->mutex);
372 } else {
373 if (packet->recv_wc)
374 ib_free_recv_mad(packet->recv_wc);
375 kfree(packet);
376 }
377 return ret;
378}
379
380static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
381{
382 int left, seg;
383
384
385 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
386 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
387 msg->hdr_len - IB_MGMT_RMPP_HDR))
388 return -EFAULT;
389
390
391 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
392 seg++, left -= msg->seg_size, buf += msg->seg_size) {
393 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
394 min(left, msg->seg_size)))
395 return -EFAULT;
396 }
397 return 0;
398}
399
400static int same_destination(struct ib_user_mad_hdr *hdr1,
401 struct ib_user_mad_hdr *hdr2)
402{
403 if (!hdr1->grh_present && !hdr2->grh_present)
404 return (hdr1->lid == hdr2->lid);
405
406 if (hdr1->grh_present && hdr2->grh_present)
407 return !memcmp(hdr1->gid, hdr2->gid, 16);
408
409 return 0;
410}
411
412static int is_duplicate(struct ib_umad_file *file,
413 struct ib_umad_packet *packet)
414{
415 struct ib_umad_packet *sent_packet;
416 struct ib_mad_hdr *sent_hdr, *hdr;
417
418 hdr = (struct ib_mad_hdr *) packet->mad.data;
419 list_for_each_entry(sent_packet, &file->send_list, list) {
420 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
421
422 if ((hdr->tid != sent_hdr->tid) ||
423 (hdr->mgmt_class != sent_hdr->mgmt_class))
424 continue;
425
426
427
428
429
430
431 if (!ib_response_mad((struct ib_mad *) hdr)) {
432 if (!ib_response_mad((struct ib_mad *) sent_hdr))
433 return 1;
434 continue;
435 } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
436 continue;
437
438 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
439 return 1;
440 }
441
442 return 0;
443}
444
445static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
446 size_t count, loff_t *pos)
447{
448 struct ib_umad_file *file = filp->private_data;
449 struct ib_umad_packet *packet;
450 struct ib_mad_agent *agent;
451 struct ib_ah_attr ah_attr;
452 struct ib_ah *ah;
453 struct ib_rmpp_mad *rmpp_mad;
454 __be64 *tid;
455 int ret, data_len, hdr_len, copy_offset, rmpp_active;
456
457 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
458 return -EINVAL;
459
460 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
461 if (!packet)
462 return -ENOMEM;
463
464 if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
465 ret = -EFAULT;
466 goto err;
467 }
468
469 if (packet->mad.hdr.id < 0 ||
470 packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
471 ret = -EINVAL;
472 goto err;
473 }
474
475 buf += hdr_size(file);
476
477 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
478 ret = -EFAULT;
479 goto err;
480 }
481
482 mutex_lock(&file->mutex);
483
484 agent = __get_agent(file, packet->mad.hdr.id);
485 if (!agent) {
486 ret = -EINVAL;
487 goto err_up;
488 }
489
490 memset(&ah_attr, 0, sizeof ah_attr);
491 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
492 ah_attr.sl = packet->mad.hdr.sl;
493 ah_attr.src_path_bits = packet->mad.hdr.path_bits;
494 ah_attr.port_num = file->port->port_num;
495 if (packet->mad.hdr.grh_present) {
496 ah_attr.ah_flags = IB_AH_GRH;
497 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
498 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
499 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
500 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
501 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
502 }
503
504 ah = ib_create_ah(agent->qp->pd, &ah_attr);
505 if (IS_ERR(ah)) {
506 ret = PTR_ERR(ah);
507 goto err_up;
508 }
509
510 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
511 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
512 if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
513 copy_offset = IB_MGMT_MAD_HDR;
514 rmpp_active = 0;
515 } else {
516 copy_offset = IB_MGMT_RMPP_HDR;
517 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
518 IB_MGMT_RMPP_FLAG_ACTIVE;
519 }
520
521 data_len = count - hdr_size(file) - hdr_len;
522 packet->msg = ib_create_send_mad(agent,
523 be32_to_cpu(packet->mad.hdr.qpn),
524 packet->mad.hdr.pkey_index, rmpp_active,
525 hdr_len, data_len, GFP_KERNEL);
526 if (IS_ERR(packet->msg)) {
527 ret = PTR_ERR(packet->msg);
528 goto err_ah;
529 }
530
531 packet->msg->ah = ah;
532 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
533 packet->msg->retries = packet->mad.hdr.retries;
534 packet->msg->context[0] = packet;
535
536
537 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
538
539 if (!rmpp_active) {
540 if (copy_from_user(packet->msg->mad + copy_offset,
541 buf + copy_offset,
542 hdr_len + data_len - copy_offset)) {
543 ret = -EFAULT;
544 goto err_msg;
545 }
546 } else {
547 ret = copy_rmpp_mad(packet->msg, buf);
548 if (ret)
549 goto err_msg;
550 }
551
552
553
554
555
556
557 if (!ib_response_mad(packet->msg->mad)) {
558 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
559 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
560 (be64_to_cpup(tid) & 0xffffffff));
561 rmpp_mad->mad_hdr.tid = *tid;
562 }
563
564 spin_lock_irq(&file->send_lock);
565 ret = is_duplicate(file, packet);
566 if (!ret)
567 list_add_tail(&packet->list, &file->send_list);
568 spin_unlock_irq(&file->send_lock);
569 if (ret) {
570 ret = -EINVAL;
571 goto err_msg;
572 }
573
574 ret = ib_post_send_mad(packet->msg, NULL);
575 if (ret)
576 goto err_send;
577
578 mutex_unlock(&file->mutex);
579 return count;
580
581err_send:
582 dequeue_send(file, packet);
583err_msg:
584 ib_free_send_mad(packet->msg);
585err_ah:
586 ib_destroy_ah(ah);
587err_up:
588 mutex_unlock(&file->mutex);
589err:
590 kfree(packet);
591 return ret;
592}
593
594static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
595{
596 struct ib_umad_file *file = filp->private_data;
597
598
599 unsigned int mask = POLLOUT | POLLWRNORM;
600
601 poll_wait(filp, &file->recv_wait, wait);
602
603 if (!list_empty(&file->recv_list))
604 mask |= POLLIN | POLLRDNORM;
605
606 return mask;
607}
608
609static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
610 int compat_method_mask)
611{
612 struct ib_user_mad_reg_req ureq;
613 struct ib_mad_reg_req req;
614 struct ib_mad_agent *agent = NULL;
615 int agent_id;
616 int ret;
617
618 mutex_lock(&file->port->file_mutex);
619 mutex_lock(&file->mutex);
620
621 if (!file->port->ib_dev) {
622 ret = -EPIPE;
623 goto out;
624 }
625
626 if (copy_from_user(&ureq, arg, sizeof ureq)) {
627 ret = -EFAULT;
628 goto out;
629 }
630
631 if (ureq.qpn != 0 && ureq.qpn != 1) {
632 ret = -EINVAL;
633 goto out;
634 }
635
636 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
637 if (!__get_agent(file, agent_id))
638 goto found;
639
640 ret = -ENOMEM;
641 goto out;
642
643found:
644 if (ureq.mgmt_class) {
645 req.mgmt_class = ureq.mgmt_class;
646 req.mgmt_class_version = ureq.mgmt_class_version;
647 memcpy(req.oui, ureq.oui, sizeof req.oui);
648
649 if (compat_method_mask) {
650 u32 *umm = (u32 *) ureq.method_mask;
651 int i;
652
653 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
654 req.method_mask[i] =
655 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
656 } else
657 memcpy(req.method_mask, ureq.method_mask,
658 sizeof req.method_mask);
659 }
660
661 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
662 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
663 ureq.mgmt_class ? &req : NULL,
664 ureq.rmpp_version,
665 send_handler, recv_handler, file);
666 if (IS_ERR(agent)) {
667 ret = PTR_ERR(agent);
668 agent = NULL;
669 goto out;
670 }
671
672 if (put_user(agent_id,
673 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
674 ret = -EFAULT;
675 goto out;
676 }
677
678 if (!file->already_used) {
679 file->already_used = 1;
680 if (!file->use_pkey_index) {
681 printk(KERN_WARNING "user_mad: process %s did not enable "
682 "P_Key index support.\n", current->comm);
683 printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt "
684 "has info on the new ABI.\n");
685 }
686 }
687
688 file->agent[agent_id] = agent;
689 ret = 0;
690
691out:
692 mutex_unlock(&file->mutex);
693
694 if (ret && agent)
695 ib_unregister_mad_agent(agent);
696
697 mutex_unlock(&file->port->file_mutex);
698
699 return ret;
700}
701
702static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
703{
704 struct ib_mad_agent *agent = NULL;
705 u32 id;
706 int ret = 0;
707
708 if (get_user(id, arg))
709 return -EFAULT;
710
711 mutex_lock(&file->port->file_mutex);
712 mutex_lock(&file->mutex);
713
714 if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
715 ret = -EINVAL;
716 goto out;
717 }
718
719 agent = file->agent[id];
720 file->agent[id] = NULL;
721
722out:
723 mutex_unlock(&file->mutex);
724
725 if (agent)
726 ib_unregister_mad_agent(agent);
727
728 mutex_unlock(&file->port->file_mutex);
729
730 return ret;
731}
732
733static long ib_umad_enable_pkey(struct ib_umad_file *file)
734{
735 int ret = 0;
736
737 mutex_lock(&file->mutex);
738 if (file->already_used)
739 ret = -EINVAL;
740 else
741 file->use_pkey_index = 1;
742 mutex_unlock(&file->mutex);
743
744 return ret;
745}
746
747static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
748 unsigned long arg)
749{
750 switch (cmd) {
751 case IB_USER_MAD_REGISTER_AGENT:
752 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
753 case IB_USER_MAD_UNREGISTER_AGENT:
754 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
755 case IB_USER_MAD_ENABLE_PKEY:
756 return ib_umad_enable_pkey(filp->private_data);
757 default:
758 return -ENOIOCTLCMD;
759 }
760}
761
762#ifdef CONFIG_COMPAT
763static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
764 unsigned long arg)
765{
766 switch (cmd) {
767 case IB_USER_MAD_REGISTER_AGENT:
768 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
769 case IB_USER_MAD_UNREGISTER_AGENT:
770 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
771 case IB_USER_MAD_ENABLE_PKEY:
772 return ib_umad_enable_pkey(filp->private_data);
773 default:
774 return -ENOIOCTLCMD;
775 }
776}
777#endif
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792static int ib_umad_open(struct inode *inode, struct file *filp)
793{
794 struct ib_umad_port *port;
795 struct ib_umad_file *file;
796 int ret = 0;
797
798 spin_lock(&port_lock);
799 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
800 if (port)
801 kref_get(&port->umad_dev->ref);
802 spin_unlock(&port_lock);
803
804 if (!port)
805 return -ENXIO;
806
807 mutex_lock(&port->file_mutex);
808
809 if (!port->ib_dev) {
810 ret = -ENXIO;
811 goto out;
812 }
813
814 file = kzalloc(sizeof *file, GFP_KERNEL);
815 if (!file) {
816 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
817 ret = -ENOMEM;
818 goto out;
819 }
820
821 mutex_init(&file->mutex);
822 spin_lock_init(&file->send_lock);
823 INIT_LIST_HEAD(&file->recv_list);
824 INIT_LIST_HEAD(&file->send_list);
825 init_waitqueue_head(&file->recv_wait);
826
827 file->port = port;
828 filp->private_data = file;
829
830 list_add_tail(&file->port_list, &port->file_list);
831
832out:
833 mutex_unlock(&port->file_mutex);
834 return ret;
835}
836
837static int ib_umad_close(struct inode *inode, struct file *filp)
838{
839 struct ib_umad_file *file = filp->private_data;
840 struct ib_umad_device *dev = file->port->umad_dev;
841 struct ib_umad_packet *packet, *tmp;
842 int already_dead;
843 int i;
844
845 mutex_lock(&file->port->file_mutex);
846 mutex_lock(&file->mutex);
847
848 already_dead = file->agents_dead;
849 file->agents_dead = 1;
850
851 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
852 if (packet->recv_wc)
853 ib_free_recv_mad(packet->recv_wc);
854 kfree(packet);
855 }
856
857 list_del(&file->port_list);
858
859 mutex_unlock(&file->mutex);
860
861 if (!already_dead)
862 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
863 if (file->agent[i])
864 ib_unregister_mad_agent(file->agent[i]);
865
866 mutex_unlock(&file->port->file_mutex);
867
868 kfree(file);
869 kref_put(&dev->ref, ib_umad_release_dev);
870
871 return 0;
872}
873
874static const struct file_operations umad_fops = {
875 .owner = THIS_MODULE,
876 .read = ib_umad_read,
877 .write = ib_umad_write,
878 .poll = ib_umad_poll,
879 .unlocked_ioctl = ib_umad_ioctl,
880#ifdef CONFIG_COMPAT
881 .compat_ioctl = ib_umad_compat_ioctl,
882#endif
883 .open = ib_umad_open,
884 .release = ib_umad_close
885};
886
887static int ib_umad_sm_open(struct inode *inode, struct file *filp)
888{
889 struct ib_umad_port *port;
890 struct ib_port_modify props = {
891 .set_port_cap_mask = IB_PORT_SM
892 };
893 int ret;
894
895 spin_lock(&port_lock);
896 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
897 if (port)
898 kref_get(&port->umad_dev->ref);
899 spin_unlock(&port_lock);
900
901 if (!port)
902 return -ENXIO;
903
904 if (filp->f_flags & O_NONBLOCK) {
905 if (down_trylock(&port->sm_sem)) {
906 ret = -EAGAIN;
907 goto fail;
908 }
909 } else {
910 if (down_interruptible(&port->sm_sem)) {
911 ret = -ERESTARTSYS;
912 goto fail;
913 }
914 }
915
916 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
917 if (ret) {
918 up(&port->sm_sem);
919 goto fail;
920 }
921
922 filp->private_data = port;
923
924 return 0;
925
926fail:
927 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
928 return ret;
929}
930
931static int ib_umad_sm_close(struct inode *inode, struct file *filp)
932{
933 struct ib_umad_port *port = filp->private_data;
934 struct ib_port_modify props = {
935 .clr_port_cap_mask = IB_PORT_SM
936 };
937 int ret = 0;
938
939 mutex_lock(&port->file_mutex);
940 if (port->ib_dev)
941 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
942 mutex_unlock(&port->file_mutex);
943
944 up(&port->sm_sem);
945
946 kref_put(&port->umad_dev->ref, ib_umad_release_dev);
947
948 return ret;
949}
950
951static const struct file_operations umad_sm_fops = {
952 .owner = THIS_MODULE,
953 .open = ib_umad_sm_open,
954 .release = ib_umad_sm_close
955};
956
957static struct ib_client umad_client = {
958 .name = "umad",
959 .add = ib_umad_add_one,
960 .remove = ib_umad_remove_one
961};
962
963static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
964 char *buf)
965{
966 struct ib_umad_port *port = dev_get_drvdata(dev);
967
968 if (!port)
969 return -ENODEV;
970
971 return sprintf(buf, "%s\n", port->ib_dev->name);
972}
973static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
974
975static ssize_t show_port(struct device *dev, struct device_attribute *attr,
976 char *buf)
977{
978 struct ib_umad_port *port = dev_get_drvdata(dev);
979
980 if (!port)
981 return -ENODEV;
982
983 return sprintf(buf, "%d\n", port->port_num);
984}
985static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
986
987static ssize_t show_abi_version(struct class *class, char *buf)
988{
989 return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
990}
991static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
992
993static int ib_umad_init_port(struct ib_device *device, int port_num,
994 struct ib_umad_port *port)
995{
996 spin_lock(&port_lock);
997 port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
998 if (port->dev_num >= IB_UMAD_MAX_PORTS) {
999 spin_unlock(&port_lock);
1000 return -1;
1001 }
1002 set_bit(port->dev_num, dev_map);
1003 spin_unlock(&port_lock);
1004
1005 port->ib_dev = device;
1006 port->port_num = port_num;
1007 init_MUTEX(&port->sm_sem);
1008 mutex_init(&port->file_mutex);
1009 INIT_LIST_HEAD(&port->file_list);
1010
1011 port->cdev = cdev_alloc();
1012 if (!port->cdev)
1013 return -1;
1014 port->cdev->owner = THIS_MODULE;
1015 port->cdev->ops = &umad_fops;
1016 kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
1017 if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
1018 goto err_cdev;
1019
1020 port->dev = device_create(umad_class, device->dma_device,
1021 port->cdev->dev, port,
1022 "umad%d", port->dev_num);
1023 if (IS_ERR(port->dev))
1024 goto err_cdev;
1025
1026 if (device_create_file(port->dev, &dev_attr_ibdev))
1027 goto err_dev;
1028 if (device_create_file(port->dev, &dev_attr_port))
1029 goto err_dev;
1030
1031 port->sm_cdev = cdev_alloc();
1032 if (!port->sm_cdev)
1033 goto err_dev;
1034 port->sm_cdev->owner = THIS_MODULE;
1035 port->sm_cdev->ops = &umad_sm_fops;
1036 kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
1037 if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
1038 goto err_sm_cdev;
1039
1040 port->sm_dev = device_create(umad_class, device->dma_device,
1041 port->sm_cdev->dev, port,
1042 "issm%d", port->dev_num);
1043 if (IS_ERR(port->sm_dev))
1044 goto err_sm_cdev;
1045
1046 if (device_create_file(port->sm_dev, &dev_attr_ibdev))
1047 goto err_sm_dev;
1048 if (device_create_file(port->sm_dev, &dev_attr_port))
1049 goto err_sm_dev;
1050
1051 spin_lock(&port_lock);
1052 umad_port[port->dev_num] = port;
1053 spin_unlock(&port_lock);
1054
1055 return 0;
1056
1057err_sm_dev:
1058 device_destroy(umad_class, port->sm_cdev->dev);
1059
1060err_sm_cdev:
1061 cdev_del(port->sm_cdev);
1062
1063err_dev:
1064 device_destroy(umad_class, port->cdev->dev);
1065
1066err_cdev:
1067 cdev_del(port->cdev);
1068 clear_bit(port->dev_num, dev_map);
1069
1070 return -1;
1071}
1072
1073static void ib_umad_kill_port(struct ib_umad_port *port)
1074{
1075 struct ib_umad_file *file;
1076 int already_dead;
1077 int id;
1078
1079 dev_set_drvdata(port->dev, NULL);
1080 dev_set_drvdata(port->sm_dev, NULL);
1081
1082 device_destroy(umad_class, port->cdev->dev);
1083 device_destroy(umad_class, port->sm_cdev->dev);
1084
1085 cdev_del(port->cdev);
1086 cdev_del(port->sm_cdev);
1087
1088 spin_lock(&port_lock);
1089 umad_port[port->dev_num] = NULL;
1090 spin_unlock(&port_lock);
1091
1092 mutex_lock(&port->file_mutex);
1093
1094 port->ib_dev = NULL;
1095
1096 list_for_each_entry(file, &port->file_list, port_list) {
1097 mutex_lock(&file->mutex);
1098 already_dead = file->agents_dead;
1099 file->agents_dead = 1;
1100 mutex_unlock(&file->mutex);
1101
1102 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
1103 if (file->agent[id])
1104 ib_unregister_mad_agent(file->agent[id]);
1105 }
1106
1107 mutex_unlock(&port->file_mutex);
1108
1109 clear_bit(port->dev_num, dev_map);
1110}
1111
1112static void ib_umad_add_one(struct ib_device *device)
1113{
1114 struct ib_umad_device *umad_dev;
1115 int s, e, i;
1116
1117 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1118 return;
1119
1120 if (device->node_type == RDMA_NODE_IB_SWITCH)
1121 s = e = 0;
1122 else {
1123 s = 1;
1124 e = device->phys_port_cnt;
1125 }
1126
1127 umad_dev = kzalloc(sizeof *umad_dev +
1128 (e - s + 1) * sizeof (struct ib_umad_port),
1129 GFP_KERNEL);
1130 if (!umad_dev)
1131 return;
1132
1133 kref_init(&umad_dev->ref);
1134
1135 umad_dev->start_port = s;
1136 umad_dev->end_port = e;
1137
1138 for (i = s; i <= e; ++i) {
1139 umad_dev->port[i - s].umad_dev = umad_dev;
1140
1141 if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
1142 goto err;
1143 }
1144
1145 ib_set_client_data(device, &umad_client, umad_dev);
1146
1147 return;
1148
1149err:
1150 while (--i >= s)
1151 ib_umad_kill_port(&umad_dev->port[i - s]);
1152
1153 kref_put(&umad_dev->ref, ib_umad_release_dev);
1154}
1155
1156static void ib_umad_remove_one(struct ib_device *device)
1157{
1158 struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
1159 int i;
1160
1161 if (!umad_dev)
1162 return;
1163
1164 for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i)
1165 ib_umad_kill_port(&umad_dev->port[i]);
1166
1167 kref_put(&umad_dev->ref, ib_umad_release_dev);
1168}
1169
1170static int __init ib_umad_init(void)
1171{
1172 int ret;
1173
1174 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
1175 "infiniband_mad");
1176 if (ret) {
1177 printk(KERN_ERR "user_mad: couldn't register device number\n");
1178 goto out;
1179 }
1180
1181 umad_class = class_create(THIS_MODULE, "infiniband_mad");
1182 if (IS_ERR(umad_class)) {
1183 ret = PTR_ERR(umad_class);
1184 printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
1185 goto out_chrdev;
1186 }
1187
1188 ret = class_create_file(umad_class, &class_attr_abi_version);
1189 if (ret) {
1190 printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
1191 goto out_class;
1192 }
1193
1194 ret = ib_register_client(&umad_client);
1195 if (ret) {
1196 printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
1197 goto out_class;
1198 }
1199
1200 return 0;
1201
1202out_class:
1203 class_destroy(umad_class);
1204
1205out_chrdev:
1206 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1207
1208out:
1209 return ret;
1210}
1211
1212static void __exit ib_umad_cleanup(void)
1213{
1214 ib_unregister_client(&umad_client);
1215 class_destroy(umad_class);
1216 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1217}
1218
1219module_init(ib_umad_init);
1220module_exit(ib_umad_cleanup);
1221