1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/pci.h>
36#include <linux/poll.h>
37#include <linux/cdev.h>
38#include <linux/swap.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <asm/pgtable.h>
44#include <linux/delay.h>
45#include <linux/export.h>
46#include <linux/uio.h>
47
48#include <rdma/ib.h>
49
50#include "qib.h"
51#include "qib_common.h"
52#include "qib_user_sdma.h"
53
54#undef pr_fmt
55#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
56
57static int qib_open(struct inode *, struct file *);
58static int qib_close(struct inode *, struct file *);
59static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
60static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
61static __poll_t qib_poll(struct file *, struct poll_table_struct *);
62static int qib_mmapf(struct file *, struct vm_area_struct *);
63
64
65
66
67
68
69static const struct file_operations qib_file_ops = {
70 .owner = THIS_MODULE,
71 .write = qib_write,
72 .write_iter = qib_write_iter,
73 .open = qib_open,
74 .release = qib_close,
75 .poll = qib_poll,
76 .mmap = qib_mmapf,
77 .llseek = noop_llseek,
78};
79
80
81
82
83
84
85
86static u64 cvt_kvaddr(void *p)
87{
88 struct page *page;
89 u64 paddr = 0;
90
91 page = vmalloc_to_page(p);
92 if (page)
93 paddr = page_to_pfn(page) << PAGE_SHIFT;
94
95 return paddr;
96}
97
98static int qib_get_base_info(struct file *fp, void __user *ubase,
99 size_t ubase_size)
100{
101 struct qib_ctxtdata *rcd = ctxt_fp(fp);
102 int ret = 0;
103 struct qib_base_info *kinfo = NULL;
104 struct qib_devdata *dd = rcd->dd;
105 struct qib_pportdata *ppd = rcd->ppd;
106 unsigned subctxt_cnt;
107 int shared, master;
108 size_t sz;
109
110 subctxt_cnt = rcd->subctxt_cnt;
111 if (!subctxt_cnt) {
112 shared = 0;
113 master = 0;
114 subctxt_cnt = 1;
115 } else {
116 shared = 1;
117 master = !subctxt_fp(fp);
118 }
119
120 sz = sizeof(*kinfo);
121
122 if (!shared)
123 sz -= 7 * sizeof(u64);
124 if (ubase_size < sz) {
125 ret = -EINVAL;
126 goto bail;
127 }
128
129 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
130 if (kinfo == NULL) {
131 ret = -ENOMEM;
132 goto bail;
133 }
134
135 ret = dd->f_get_base_info(rcd, kinfo);
136 if (ret < 0)
137 goto bail;
138
139 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
140 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
142 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
143
144
145
146 kinfo->spi_rcv_egrbuftotlen =
147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
149 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
150 rcd->rcvegrbuf_chunks;
151 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
152 if (master)
153 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
154
155
156
157
158 kinfo->spi_nctxts = dd->cfgctxts;
159
160 kinfo->spi_unit = dd->unit;
161 kinfo->spi_port = ppd->port;
162
163 kinfo->spi_tid_maxsize = PAGE_SIZE;
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
185 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
186 kinfo->spi_rhf_offset = dd->rhf_offset;
187 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
188 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
189
190 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
191 (char *) ppd->statusp -
192 (char *) dd->pioavailregs_dma;
193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
194 if (!shared) {
195 kinfo->spi_piocnt = rcd->piocnt;
196 kinfo->spi_piobufbase = (u64) rcd->piobufs;
197 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
198 } else if (master) {
199 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
200 (rcd->piocnt % subctxt_cnt);
201
202 kinfo->spi_piobufbase = (u64) rcd->piobufs +
203 dd->palign *
204 (rcd->piocnt - kinfo->spi_piocnt);
205 } else {
206 unsigned slave = subctxt_fp(fp) - 1;
207
208 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
209 kinfo->spi_piobufbase = (u64) rcd->piobufs +
210 dd->palign * kinfo->spi_piocnt * slave;
211 }
212
213 if (shared) {
214 kinfo->spi_sendbuf_status =
215 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
216
217 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
218
219 kinfo->spi_subctxt_rcvegrbuf =
220 cvt_kvaddr(rcd->subctxt_rcvegrbuf);
221 kinfo->spi_subctxt_rcvhdr_base =
222 cvt_kvaddr(rcd->subctxt_rcvhdr_base);
223 }
224
225
226
227
228
229
230
231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
232 dd->palign;
233 kinfo->spi_pioalign = dd->palign;
234 kinfo->spi_qpair = QIB_KD_QP;
235
236
237
238
239
240 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
241 kinfo->spi_mtu = ppd->ibmaxlen;
242 kinfo->spi_ctxt = rcd->ctxt;
243 kinfo->spi_subctxt = subctxt_fp(fp);
244 kinfo->spi_sw_version = QIB_KERN_SWVERSION;
245 kinfo->spi_sw_version |= 1U << 31;
246 kinfo->spi_hw_version = dd->revision;
247
248 if (master)
249 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
250
251 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
252 if (copy_to_user(ubase, kinfo, sz))
253 ret = -EFAULT;
254bail:
255 kfree(kinfo);
256 return ret;
257}
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
285 const struct qib_tid_info *ti)
286{
287 int ret = 0, ntids;
288 u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
289 u16 *tidlist;
290 struct qib_devdata *dd = rcd->dd;
291 u64 physaddr;
292 unsigned long vaddr;
293 u64 __iomem *tidbase;
294 unsigned long tidmap[8];
295 struct page **pagep = NULL;
296 unsigned subctxt = subctxt_fp(fp);
297
298 if (!dd->pageshadow) {
299 ret = -ENOMEM;
300 goto done;
301 }
302
303 cnt = ti->tidcnt;
304 if (!cnt) {
305 ret = -EFAULT;
306 goto done;
307 }
308 ctxttid = rcd->ctxt * dd->rcvtidcnt;
309 if (!rcd->subctxt_cnt) {
310 tidcnt = dd->rcvtidcnt;
311 tid = rcd->tidcursor;
312 tidoff = 0;
313 } else if (!subctxt) {
314 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
315 (dd->rcvtidcnt % rcd->subctxt_cnt);
316 tidoff = dd->rcvtidcnt - tidcnt;
317 ctxttid += tidoff;
318 tid = tidcursor_fp(fp);
319 } else {
320 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
321 tidoff = tidcnt * (subctxt - 1);
322 ctxttid += tidoff;
323 tid = tidcursor_fp(fp);
324 }
325 if (cnt > tidcnt) {
326
327 qib_devinfo(dd->pcidev,
328 "Process tried to allocate %u TIDs, only trying max (%u)\n",
329 cnt, tidcnt);
330 cnt = tidcnt;
331 }
332 pagep = (struct page **) rcd->tid_pg_list;
333 tidlist = (u16 *) &pagep[dd->rcvtidcnt];
334 pagep += tidoff;
335 tidlist += tidoff;
336
337 memset(tidmap, 0, sizeof(tidmap));
338
339 ntids = tidcnt;
340 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
341 dd->rcvtidbase +
342 ctxttid * sizeof(*tidbase));
343
344
345 vaddr = ti->tidvaddr;
346 if (!access_ok((void __user *) vaddr,
347 cnt * PAGE_SIZE)) {
348 ret = -EFAULT;
349 goto done;
350 }
351 ret = qib_get_user_pages(vaddr, cnt, pagep);
352 if (ret) {
353
354
355
356
357
358
359
360 qib_devinfo(
361 dd->pcidev,
362 "Failed to lock addr %p, %u pages: errno %d\n",
363 (void *) vaddr, cnt, -ret);
364 goto done;
365 }
366 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
367 dma_addr_t daddr;
368
369 for (; ntids--; tid++) {
370 if (tid == tidcnt)
371 tid = 0;
372 if (!dd->pageshadow[ctxttid + tid])
373 break;
374 }
375 if (ntids < 0) {
376
377
378
379
380
381 i--;
382 ret = -ENOMEM;
383 break;
384 }
385 ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
386 if (ret)
387 break;
388
389 tidlist[i] = tid + tidoff;
390
391 dd->pageshadow[ctxttid + tid] = pagep[i];
392 dd->physshadow[ctxttid + tid] = daddr;
393
394
395
396 __set_bit(tid, tidmap);
397 physaddr = dd->physshadow[ctxttid + tid];
398
399 dd->f_put_tid(dd, &tidbase[tid],
400 RCVHQ_RCV_TYPE_EXPECTED, physaddr);
401
402
403
404
405 tid++;
406 }
407
408 if (ret) {
409 u32 limit;
410cleanup:
411
412
413 limit = sizeof(tidmap) * BITS_PER_BYTE;
414 if (limit > tidcnt)
415
416 limit = tidcnt;
417 tid = find_first_bit((const unsigned long *)tidmap, limit);
418 for (; tid < limit; tid++) {
419 if (!test_bit(tid, tidmap))
420 continue;
421 if (dd->pageshadow[ctxttid + tid]) {
422 dma_addr_t phys;
423
424 phys = dd->physshadow[ctxttid + tid];
425 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
426
427
428
429 dd->f_put_tid(dd, &tidbase[tid],
430 RCVHQ_RCV_TYPE_EXPECTED,
431 dd->tidinvalid);
432 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
433 PCI_DMA_FROMDEVICE);
434 dd->pageshadow[ctxttid + tid] = NULL;
435 }
436 }
437 qib_release_user_pages(pagep, cnt);
438 } else {
439
440
441
442
443
444 if (copy_to_user((void __user *)
445 (unsigned long) ti->tidlist,
446 tidlist, cnt * sizeof(*tidlist))) {
447 ret = -EFAULT;
448 goto cleanup;
449 }
450 if (copy_to_user(u64_to_user_ptr(ti->tidmap),
451 tidmap, sizeof(tidmap))) {
452 ret = -EFAULT;
453 goto cleanup;
454 }
455 if (tid == tidcnt)
456 tid = 0;
457 if (!rcd->subctxt_cnt)
458 rcd->tidcursor = tid;
459 else
460 tidcursor_fp(fp) = tid;
461 }
462
463done:
464 return ret;
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 const struct qib_tid_info *ti)
485{
486 int ret = 0;
487 u32 tid, ctxttid, cnt, limit, tidcnt;
488 struct qib_devdata *dd = rcd->dd;
489 u64 __iomem *tidbase;
490 unsigned long tidmap[8];
491
492 if (!dd->pageshadow) {
493 ret = -ENOMEM;
494 goto done;
495 }
496
497 if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap),
498 sizeof(tidmap))) {
499 ret = -EFAULT;
500 goto done;
501 }
502
503 ctxttid = rcd->ctxt * dd->rcvtidcnt;
504 if (!rcd->subctxt_cnt)
505 tidcnt = dd->rcvtidcnt;
506 else if (!subctxt) {
507 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
508 (dd->rcvtidcnt % rcd->subctxt_cnt);
509 ctxttid += dd->rcvtidcnt - tidcnt;
510 } else {
511 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
512 ctxttid += tidcnt * (subctxt - 1);
513 }
514 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
515 dd->rcvtidbase +
516 ctxttid * sizeof(*tidbase));
517
518 limit = sizeof(tidmap) * BITS_PER_BYTE;
519 if (limit > tidcnt)
520
521 limit = tidcnt;
522 tid = find_first_bit(tidmap, limit);
523 for (cnt = 0; tid < limit; tid++) {
524
525
526
527
528
529
530
531 if (!test_bit(tid, tidmap))
532 continue;
533 cnt++;
534 if (dd->pageshadow[ctxttid + tid]) {
535 struct page *p;
536 dma_addr_t phys;
537
538 p = dd->pageshadow[ctxttid + tid];
539 dd->pageshadow[ctxttid + tid] = NULL;
540 phys = dd->physshadow[ctxttid + tid];
541 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
542
543
544
545 dd->f_put_tid(dd, &tidbase[tid],
546 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
547 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
548 PCI_DMA_FROMDEVICE);
549 qib_release_user_pages(&p, 1);
550 }
551 }
552done:
553 return ret;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
573{
574 struct qib_pportdata *ppd = rcd->ppd;
575 int i, pidx = -1;
576 bool any = false;
577 u16 lkey = key & 0x7FFF;
578
579 if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
580
581 return 0;
582
583 if (!lkey)
584 return -EINVAL;
585
586
587
588
589
590
591
592 key |= 0x8000;
593
594 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
595 if (!rcd->pkeys[i] && pidx == -1)
596 pidx = i;
597 if (rcd->pkeys[i] == key)
598 return -EEXIST;
599 }
600 if (pidx == -1)
601 return -EBUSY;
602 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
603 if (!ppd->pkeys[i]) {
604 any = true;
605 continue;
606 }
607 if (ppd->pkeys[i] == key) {
608 atomic_t *pkrefs = &ppd->pkeyrefs[i];
609
610 if (atomic_inc_return(pkrefs) > 1) {
611 rcd->pkeys[pidx] = key;
612 return 0;
613 }
614
615
616
617 atomic_dec(pkrefs);
618 any = true;
619 }
620 if ((ppd->pkeys[i] & 0x7FFF) == lkey)
621
622
623
624
625
626 return -EEXIST;
627 }
628 if (!any)
629 return -EBUSY;
630 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
631 if (!ppd->pkeys[i] &&
632 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
633 rcd->pkeys[pidx] = key;
634 ppd->pkeys[i] = key;
635 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
636 return 0;
637 }
638 }
639 return -EBUSY;
640}
641
642
643
644
645
646
647
648
649
650
651
652static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
653 int start_stop)
654{
655 struct qib_devdata *dd = rcd->dd;
656 unsigned int rcvctrl_op;
657
658 if (subctxt)
659 goto bail;
660
661 if (start_stop) {
662
663
664
665
666
667
668
669
670 if (rcd->rcvhdrtail_kvaddr)
671 qib_clear_rcvhdrtail(rcd);
672 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
673 } else
674 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
675 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
676
677bail:
678 return 0;
679}
680
681static void qib_clean_part_key(struct qib_ctxtdata *rcd,
682 struct qib_devdata *dd)
683{
684 int i, j, pchanged = 0;
685 struct qib_pportdata *ppd = rcd->ppd;
686
687 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
688 if (!rcd->pkeys[i])
689 continue;
690 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
691
692 if ((ppd->pkeys[j] & 0x7fff) !=
693 (rcd->pkeys[i] & 0x7fff))
694 continue;
695 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
696 ppd->pkeys[j] = 0;
697 pchanged++;
698 }
699 break;
700 }
701 rcd->pkeys[i] = 0;
702 }
703 if (pchanged)
704 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
705}
706
707
708static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
709 unsigned len, void *kvaddr, u32 write_ok, char *what)
710{
711 struct qib_devdata *dd = rcd->dd;
712 unsigned long pfn;
713 int ret;
714
715 if ((vma->vm_end - vma->vm_start) > len) {
716 qib_devinfo(dd->pcidev,
717 "FAIL on %s: len %lx > %x\n", what,
718 vma->vm_end - vma->vm_start, len);
719 ret = -EFAULT;
720 goto bail;
721 }
722
723
724
725
726
727 if (!write_ok) {
728 if (vma->vm_flags & VM_WRITE) {
729 qib_devinfo(dd->pcidev,
730 "%s must be mapped readonly\n", what);
731 ret = -EPERM;
732 goto bail;
733 }
734
735
736 vma->vm_flags &= ~VM_MAYWRITE;
737 }
738
739 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
740 ret = remap_pfn_range(vma, vma->vm_start, pfn,
741 len, vma->vm_page_prot);
742 if (ret)
743 qib_devinfo(dd->pcidev,
744 "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
745 what, rcd->ctxt, pfn, len, ret);
746bail:
747 return ret;
748}
749
750static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
751 u64 ureg)
752{
753 unsigned long phys;
754 unsigned long sz;
755 int ret;
756
757
758
759
760
761
762 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
763 if ((vma->vm_end - vma->vm_start) > sz) {
764 qib_devinfo(dd->pcidev,
765 "FAIL mmap userreg: reqlen %lx > PAGE\n",
766 vma->vm_end - vma->vm_start);
767 ret = -EFAULT;
768 } else {
769 phys = dd->physaddr + ureg;
770 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
771
772 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
773 ret = io_remap_pfn_range(vma, vma->vm_start,
774 phys >> PAGE_SHIFT,
775 vma->vm_end - vma->vm_start,
776 vma->vm_page_prot);
777 }
778 return ret;
779}
780
781static int mmap_piobufs(struct vm_area_struct *vma,
782 struct qib_devdata *dd,
783 struct qib_ctxtdata *rcd,
784 unsigned piobufs, unsigned piocnt)
785{
786 unsigned long phys;
787 int ret;
788
789
790
791
792
793
794
795 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
796 qib_devinfo(dd->pcidev,
797 "FAIL mmap piobufs: reqlen %lx > PAGE\n",
798 vma->vm_end - vma->vm_start);
799 ret = -EINVAL;
800 goto bail;
801 }
802
803 phys = dd->physaddr + piobufs;
804
805#if defined(__powerpc__)
806 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
807#endif
808
809
810
811
812
813 vma->vm_flags &= ~VM_MAYREAD;
814 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
815
816
817 if (!dd->wc_cookie)
818 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
819
820 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
821 vma->vm_end - vma->vm_start,
822 vma->vm_page_prot);
823bail:
824 return ret;
825}
826
827static int mmap_rcvegrbufs(struct vm_area_struct *vma,
828 struct qib_ctxtdata *rcd)
829{
830 struct qib_devdata *dd = rcd->dd;
831 unsigned long start, size;
832 size_t total_size, i;
833 unsigned long pfn;
834 int ret;
835
836 size = rcd->rcvegrbuf_size;
837 total_size = rcd->rcvegrbuf_chunks * size;
838 if ((vma->vm_end - vma->vm_start) > total_size) {
839 qib_devinfo(dd->pcidev,
840 "FAIL on egr bufs: reqlen %lx > actual %lx\n",
841 vma->vm_end - vma->vm_start,
842 (unsigned long) total_size);
843 ret = -EINVAL;
844 goto bail;
845 }
846
847 if (vma->vm_flags & VM_WRITE) {
848 qib_devinfo(dd->pcidev,
849 "Can't map eager buffers as writable (flags=%lx)\n",
850 vma->vm_flags);
851 ret = -EPERM;
852 goto bail;
853 }
854
855 vma->vm_flags &= ~VM_MAYWRITE;
856
857 start = vma->vm_start;
858
859 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
860 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
861 ret = remap_pfn_range(vma, start, pfn, size,
862 vma->vm_page_prot);
863 if (ret < 0)
864 goto bail;
865 }
866 ret = 0;
867
868bail:
869 return ret;
870}
871
872
873
874
875static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
876{
877 struct page *page;
878
879 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
880 if (!page)
881 return VM_FAULT_SIGBUS;
882
883 get_page(page);
884 vmf->page = page;
885
886 return 0;
887}
888
889static const struct vm_operations_struct qib_file_vm_ops = {
890 .fault = qib_file_vma_fault,
891};
892
893static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
894 struct qib_ctxtdata *rcd, unsigned subctxt)
895{
896 struct qib_devdata *dd = rcd->dd;
897 unsigned subctxt_cnt;
898 unsigned long len;
899 void *addr;
900 size_t size;
901 int ret = 0;
902
903 subctxt_cnt = rcd->subctxt_cnt;
904 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
905
906
907
908
909
910
911 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
912 addr = rcd->subctxt_uregbase;
913 size = PAGE_SIZE * subctxt_cnt;
914 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
915 addr = rcd->subctxt_rcvhdr_base;
916 size = rcd->rcvhdrq_size * subctxt_cnt;
917 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
918 addr = rcd->subctxt_rcvegrbuf;
919 size *= subctxt_cnt;
920 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
921 PAGE_SIZE * subctxt)) {
922 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
923 size = PAGE_SIZE;
924 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
925 rcd->rcvhdrq_size * subctxt)) {
926 addr = rcd->subctxt_rcvhdr_base +
927 rcd->rcvhdrq_size * subctxt;
928 size = rcd->rcvhdrq_size;
929 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
930 addr = rcd->user_event_mask;
931 size = PAGE_SIZE;
932 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
933 size * subctxt)) {
934 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
935
936 if (vma->vm_flags & VM_WRITE) {
937 qib_devinfo(dd->pcidev,
938 "Can't map eager buffers as writable (flags=%lx)\n",
939 vma->vm_flags);
940 ret = -EPERM;
941 goto bail;
942 }
943
944
945
946
947 vma->vm_flags &= ~VM_MAYWRITE;
948 } else
949 goto bail;
950 len = vma->vm_end - vma->vm_start;
951 if (len > size) {
952 ret = -EINVAL;
953 goto bail;
954 }
955
956 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
957 vma->vm_ops = &qib_file_vm_ops;
958 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
959 ret = 1;
960
961bail:
962 return ret;
963}
964
965
966
967
968
969
970
971
972
973
974
975static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
976{
977 struct qib_ctxtdata *rcd;
978 struct qib_devdata *dd;
979 u64 pgaddr, ureg;
980 unsigned piobufs, piocnt;
981 int ret, match = 1;
982
983 rcd = ctxt_fp(fp);
984 if (!rcd || !(vma->vm_flags & VM_SHARED)) {
985 ret = -EINVAL;
986 goto bail;
987 }
988 dd = rcd->dd;
989
990
991
992
993
994
995
996
997
998
999 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1000
1001
1002
1003
1004
1005 if (!pgaddr) {
1006 ret = -EINVAL;
1007 goto bail;
1008 }
1009
1010
1011
1012
1013
1014
1015 ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1016 if (ret) {
1017 if (ret > 0)
1018 ret = 0;
1019 goto bail;
1020 }
1021
1022 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1023 if (!rcd->subctxt_cnt) {
1024
1025 piocnt = rcd->piocnt;
1026 piobufs = rcd->piobufs;
1027 } else if (!subctxt_fp(fp)) {
1028
1029 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1030 (rcd->piocnt % rcd->subctxt_cnt);
1031 piobufs = rcd->piobufs +
1032 dd->palign * (rcd->piocnt - piocnt);
1033 } else {
1034 unsigned slave = subctxt_fp(fp) - 1;
1035
1036
1037 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1038 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1039 }
1040
1041 if (pgaddr == ureg)
1042 ret = mmap_ureg(vma, dd, ureg);
1043 else if (pgaddr == piobufs)
1044 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1045 else if (pgaddr == dd->pioavailregs_phys)
1046
1047 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1048 (void *) dd->pioavailregs_dma, 0,
1049 "pioavail registers");
1050 else if (pgaddr == rcd->rcvegr_phys)
1051 ret = mmap_rcvegrbufs(vma, rcd);
1052 else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1053
1054
1055
1056
1057
1058 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1059 rcd->rcvhdrq, 1, "rcvhdrq");
1060 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1061
1062 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1063 rcd->rcvhdrtail_kvaddr, 0,
1064 "rcvhdrq tail");
1065 else
1066 match = 0;
1067 if (!match)
1068 ret = -EINVAL;
1069
1070 vma->vm_private_data = NULL;
1071
1072 if (ret < 0)
1073 qib_devinfo(dd->pcidev,
1074 "mmap Failure %d: off %llx len %lx\n",
1075 -ret, (unsigned long long)pgaddr,
1076 vma->vm_end - vma->vm_start);
1077bail:
1078 return ret;
1079}
1080
1081static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
1082 struct file *fp,
1083 struct poll_table_struct *pt)
1084{
1085 struct qib_devdata *dd = rcd->dd;
1086 __poll_t pollflag;
1087
1088 poll_wait(fp, &rcd->wait, pt);
1089
1090 spin_lock_irq(&dd->uctxt_lock);
1091 if (rcd->urgent != rcd->urgent_poll) {
1092 pollflag = EPOLLIN | EPOLLRDNORM;
1093 rcd->urgent_poll = rcd->urgent;
1094 } else {
1095 pollflag = 0;
1096 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1097 }
1098 spin_unlock_irq(&dd->uctxt_lock);
1099
1100 return pollflag;
1101}
1102
1103static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
1104 struct file *fp,
1105 struct poll_table_struct *pt)
1106{
1107 struct qib_devdata *dd = rcd->dd;
1108 __poll_t pollflag;
1109
1110 poll_wait(fp, &rcd->wait, pt);
1111
1112 spin_lock_irq(&dd->uctxt_lock);
1113 if (dd->f_hdrqempty(rcd)) {
1114 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1115 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1116 pollflag = 0;
1117 } else
1118 pollflag = EPOLLIN | EPOLLRDNORM;
1119 spin_unlock_irq(&dd->uctxt_lock);
1120
1121 return pollflag;
1122}
1123
1124static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
1125{
1126 struct qib_ctxtdata *rcd;
1127 __poll_t pollflag;
1128
1129 rcd = ctxt_fp(fp);
1130 if (!rcd)
1131 pollflag = EPOLLERR;
1132 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1133 pollflag = qib_poll_urgent(rcd, fp, pt);
1134 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1135 pollflag = qib_poll_next(rcd, fp, pt);
1136 else
1137 pollflag = EPOLLERR;
1138
1139 return pollflag;
1140}
1141
1142static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1143{
1144 struct qib_filedata *fd = fp->private_data;
1145 const unsigned int weight = current->nr_cpus_allowed;
1146 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1147 int local_cpu;
1148
1149
1150
1151
1152
1153 if ((weight >= qib_cpulist_count) &&
1154 (cpumask_weight(local_mask) <= qib_cpulist_count)) {
1155 for_each_cpu(local_cpu, local_mask)
1156 if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1157 fd->rec_cpu_num = local_cpu;
1158 return;
1159 }
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 if (weight >= qib_cpulist_count) {
1171 int cpu;
1172
1173 cpu = find_first_zero_bit(qib_cpulist,
1174 qib_cpulist_count);
1175 if (cpu == qib_cpulist_count)
1176 qib_dev_err(dd,
1177 "no cpus avail for affinity PID %u\n",
1178 current->pid);
1179 else {
1180 __set_bit(cpu, qib_cpulist);
1181 fd->rec_cpu_num = cpu;
1182 }
1183 }
1184}
1185
1186
1187
1188
1189static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1190{
1191
1192 if (QIB_USER_SWMAJOR != user_swmajor) {
1193
1194 return 0;
1195 }
1196 if (QIB_USER_SWMAJOR == 1) {
1197 switch (QIB_USER_SWMINOR) {
1198 case 0:
1199 case 1:
1200 case 2:
1201
1202 return 0;
1203 case 3:
1204
1205 return user_swminor == 3;
1206 default:
1207
1208 return user_swminor <= QIB_USER_SWMINOR;
1209 }
1210 }
1211
1212 return 0;
1213}
1214
1215static int init_subctxts(struct qib_devdata *dd,
1216 struct qib_ctxtdata *rcd,
1217 const struct qib_user_info *uinfo)
1218{
1219 int ret = 0;
1220 unsigned num_subctxts;
1221 size_t size;
1222
1223
1224
1225
1226
1227 if (uinfo->spu_subctxt_cnt <= 0)
1228 goto bail;
1229 num_subctxts = uinfo->spu_subctxt_cnt;
1230
1231
1232 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1233 uinfo->spu_userversion & 0xffff)) {
1234 qib_devinfo(dd->pcidev,
1235 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1236 (int) (uinfo->spu_userversion >> 16),
1237 (int) (uinfo->spu_userversion & 0xffff),
1238 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1239 goto bail;
1240 }
1241 if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1242 ret = -EINVAL;
1243 goto bail;
1244 }
1245
1246 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1247 if (!rcd->subctxt_uregbase) {
1248 ret = -ENOMEM;
1249 goto bail;
1250 }
1251
1252 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1253 sizeof(u32), PAGE_SIZE) * num_subctxts;
1254 rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1255 if (!rcd->subctxt_rcvhdr_base) {
1256 ret = -ENOMEM;
1257 goto bail_ureg;
1258 }
1259
1260 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1261 rcd->rcvegrbuf_size *
1262 num_subctxts);
1263 if (!rcd->subctxt_rcvegrbuf) {
1264 ret = -ENOMEM;
1265 goto bail_rhdr;
1266 }
1267
1268 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1269 rcd->subctxt_id = uinfo->spu_subctxt_id;
1270 rcd->active_slaves = 1;
1271 rcd->redirect_seq_cnt = 1;
1272 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1273 goto bail;
1274
1275bail_rhdr:
1276 vfree(rcd->subctxt_rcvhdr_base);
1277bail_ureg:
1278 vfree(rcd->subctxt_uregbase);
1279 rcd->subctxt_uregbase = NULL;
1280bail:
1281 return ret;
1282}
1283
1284static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1285 struct file *fp, const struct qib_user_info *uinfo)
1286{
1287 struct qib_filedata *fd = fp->private_data;
1288 struct qib_devdata *dd = ppd->dd;
1289 struct qib_ctxtdata *rcd;
1290 void *ptmp = NULL;
1291 int ret;
1292 int numa_id;
1293
1294 assign_ctxt_affinity(fp, dd);
1295
1296 numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1297 cpu_to_node(fd->rec_cpu_num) :
1298 numa_node_id()) : dd->assigned_node_id;
1299
1300 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1301
1302
1303
1304
1305
1306 if (rcd)
1307 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1308 dd->rcvtidcnt * sizeof(struct page **),
1309 GFP_KERNEL);
1310
1311 if (!rcd || !ptmp) {
1312 qib_dev_err(dd,
1313 "Unable to allocate ctxtdata memory, failing open\n");
1314 ret = -ENOMEM;
1315 goto bailerr;
1316 }
1317 rcd->userversion = uinfo->spu_userversion;
1318 ret = init_subctxts(dd, rcd, uinfo);
1319 if (ret)
1320 goto bailerr;
1321 rcd->tid_pg_list = ptmp;
1322 rcd->pid = current->pid;
1323 init_waitqueue_head(&dd->rcd[ctxt]->wait);
1324 strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1325 ctxt_fp(fp) = rcd;
1326 qib_stats.sps_ctxts++;
1327 dd->freectxts--;
1328 ret = 0;
1329 goto bail;
1330
1331bailerr:
1332 if (fd->rec_cpu_num != -1)
1333 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1334
1335 dd->rcd[ctxt] = NULL;
1336 kfree(rcd);
1337 kfree(ptmp);
1338bail:
1339 return ret;
1340}
1341
1342static inline int usable(struct qib_pportdata *ppd)
1343{
1344 struct qib_devdata *dd = ppd->dd;
1345
1346 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1347 (ppd->lflags & QIBL_LINKACTIVE);
1348}
1349
1350
1351
1352
1353
1354static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1355 const struct qib_user_info *uinfo)
1356{
1357 struct qib_pportdata *ppd = NULL;
1358 int ret, ctxt;
1359
1360 if (port) {
1361 if (!usable(dd->pport + port - 1)) {
1362 ret = -ENETDOWN;
1363 goto done;
1364 } else
1365 ppd = dd->pport + port - 1;
1366 }
1367 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1368 ctxt++)
1369 ;
1370 if (ctxt == dd->cfgctxts) {
1371 ret = -EBUSY;
1372 goto done;
1373 }
1374 if (!ppd) {
1375 u32 pidx = ctxt % dd->num_pports;
1376
1377 if (usable(dd->pport + pidx))
1378 ppd = dd->pport + pidx;
1379 else {
1380 for (pidx = 0; pidx < dd->num_pports && !ppd;
1381 pidx++)
1382 if (usable(dd->pport + pidx))
1383 ppd = dd->pport + pidx;
1384 }
1385 }
1386 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1387done:
1388 return ret;
1389}
1390
1391static int find_free_ctxt(int unit, struct file *fp,
1392 const struct qib_user_info *uinfo)
1393{
1394 struct qib_devdata *dd = qib_lookup(unit);
1395 int ret;
1396
1397 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1398 ret = -ENODEV;
1399 else
1400 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1401
1402 return ret;
1403}
1404
1405static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1406 unsigned alg)
1407{
1408 struct qib_devdata *udd = NULL;
1409 int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1410 u32 port = uinfo->spu_port, ctxt;
1411
1412 devmax = qib_count_units(&npresent, &nup);
1413 if (!npresent) {
1414 ret = -ENXIO;
1415 goto done;
1416 }
1417 if (nup == 0) {
1418 ret = -ENETDOWN;
1419 goto done;
1420 }
1421
1422 if (alg == QIB_PORT_ALG_ACROSS) {
1423 unsigned inuse = ~0U;
1424
1425
1426 for (ndev = 0; ndev < devmax; ndev++) {
1427 struct qib_devdata *dd = qib_lookup(ndev);
1428 unsigned cused = 0, cfree = 0, pusable = 0;
1429
1430 if (!dd)
1431 continue;
1432 if (port && port <= dd->num_pports &&
1433 usable(dd->pport + port - 1))
1434 pusable = 1;
1435 else
1436 for (i = 0; i < dd->num_pports; i++)
1437 if (usable(dd->pport + i))
1438 pusable++;
1439 if (!pusable)
1440 continue;
1441 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1442 ctxt++)
1443 if (dd->rcd[ctxt])
1444 cused++;
1445 else
1446 cfree++;
1447 if (cfree && cused < inuse) {
1448 udd = dd;
1449 inuse = cused;
1450 }
1451 }
1452 if (udd) {
1453 ret = choose_port_ctxt(fp, udd, port, uinfo);
1454 goto done;
1455 }
1456 } else {
1457 for (ndev = 0; ndev < devmax; ndev++) {
1458 struct qib_devdata *dd = qib_lookup(ndev);
1459
1460 if (dd) {
1461 ret = choose_port_ctxt(fp, dd, port, uinfo);
1462 if (!ret)
1463 goto done;
1464 if (ret == -EBUSY)
1465 dusable++;
1466 }
1467 }
1468 }
1469 ret = dusable ? -EBUSY : -ENETDOWN;
1470
1471done:
1472 return ret;
1473}
1474
1475static int find_shared_ctxt(struct file *fp,
1476 const struct qib_user_info *uinfo)
1477{
1478 int devmax, ndev, i;
1479 int ret = 0;
1480
1481 devmax = qib_count_units(NULL, NULL);
1482
1483 for (ndev = 0; ndev < devmax; ndev++) {
1484 struct qib_devdata *dd = qib_lookup(ndev);
1485
1486
1487 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1488 continue;
1489 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1490 struct qib_ctxtdata *rcd = dd->rcd[i];
1491
1492
1493 if (!rcd || !rcd->cnt)
1494 continue;
1495
1496 if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1497 continue;
1498
1499 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1500 rcd->userversion != uinfo->spu_userversion ||
1501 rcd->cnt >= rcd->subctxt_cnt) {
1502 ret = -EINVAL;
1503 goto done;
1504 }
1505 ctxt_fp(fp) = rcd;
1506 subctxt_fp(fp) = rcd->cnt++;
1507 rcd->subpid[subctxt_fp(fp)] = current->pid;
1508 tidcursor_fp(fp) = 0;
1509 rcd->active_slaves |= 1 << subctxt_fp(fp);
1510 ret = 1;
1511 goto done;
1512 }
1513 }
1514
1515done:
1516 return ret;
1517}
1518
1519static int qib_open(struct inode *in, struct file *fp)
1520{
1521
1522 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1523 if (fp->private_data)
1524 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1525 return fp->private_data ? 0 : -ENOMEM;
1526}
1527
1528static int find_hca(unsigned int cpu, int *unit)
1529{
1530 int ret = 0, devmax, npresent, nup, ndev;
1531
1532 *unit = -1;
1533
1534 devmax = qib_count_units(&npresent, &nup);
1535 if (!npresent) {
1536 ret = -ENXIO;
1537 goto done;
1538 }
1539 if (!nup) {
1540 ret = -ENETDOWN;
1541 goto done;
1542 }
1543 for (ndev = 0; ndev < devmax; ndev++) {
1544 struct qib_devdata *dd = qib_lookup(ndev);
1545
1546 if (dd) {
1547 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1548 ret = -EINVAL;
1549 goto done;
1550 }
1551 if (cpu_to_node(cpu) ==
1552 pcibus_to_node(dd->pcidev->bus)) {
1553 *unit = ndev;
1554 goto done;
1555 }
1556 }
1557 }
1558done:
1559 return ret;
1560}
1561
1562static int do_qib_user_sdma_queue_create(struct file *fp)
1563{
1564 struct qib_filedata *fd = fp->private_data;
1565 struct qib_ctxtdata *rcd = fd->rcd;
1566 struct qib_devdata *dd = rcd->dd;
1567
1568 if (dd->flags & QIB_HAS_SEND_DMA) {
1569
1570 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1571 dd->unit,
1572 rcd->ctxt,
1573 fd->subctxt);
1574 if (!fd->pq)
1575 return -ENOMEM;
1576 }
1577
1578 return 0;
1579}
1580
1581
1582
1583
1584static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1585{
1586 int ret;
1587 int i_minor;
1588 unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1589
1590
1591 if (ctxt_fp(fp)) {
1592 ret = -EINVAL;
1593 goto done;
1594 }
1595
1596
1597 swmajor = uinfo->spu_userversion >> 16;
1598 if (swmajor != QIB_USER_SWMAJOR) {
1599 ret = -ENODEV;
1600 goto done;
1601 }
1602
1603 swminor = uinfo->spu_userversion & 0xffff;
1604
1605 if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1606 alg = uinfo->spu_port_alg;
1607
1608 mutex_lock(&qib_mutex);
1609
1610 if (qib_compatible_subctxts(swmajor, swminor) &&
1611 uinfo->spu_subctxt_cnt) {
1612 ret = find_shared_ctxt(fp, uinfo);
1613 if (ret > 0) {
1614 ret = do_qib_user_sdma_queue_create(fp);
1615 if (!ret)
1616 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1617 goto done_ok;
1618 }
1619 }
1620
1621 i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1622 if (i_minor)
1623 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1624 else {
1625 int unit;
1626 const unsigned int cpu = cpumask_first(current->cpus_ptr);
1627 const unsigned int weight = current->nr_cpus_allowed;
1628
1629 if (weight == 1 && !test_bit(cpu, qib_cpulist))
1630 if (!find_hca(cpu, &unit) && unit >= 0)
1631 if (!find_free_ctxt(unit, fp, uinfo)) {
1632 ret = 0;
1633 goto done_chk_sdma;
1634 }
1635 ret = get_a_ctxt(fp, uinfo, alg);
1636 }
1637
1638done_chk_sdma:
1639 if (!ret)
1640 ret = do_qib_user_sdma_queue_create(fp);
1641done_ok:
1642 mutex_unlock(&qib_mutex);
1643
1644done:
1645 return ret;
1646}
1647
1648
1649static int qib_do_user_init(struct file *fp,
1650 const struct qib_user_info *uinfo)
1651{
1652 int ret;
1653 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1654 struct qib_devdata *dd;
1655 unsigned uctxt;
1656
1657
1658 if (subctxt_fp(fp)) {
1659 ret = wait_event_interruptible(rcd->wait,
1660 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1661 goto bail;
1662 }
1663
1664 dd = rcd->dd;
1665
1666
1667 uctxt = rcd->ctxt - dd->first_user_ctxt;
1668 if (uctxt < dd->ctxts_extrabuf) {
1669 rcd->piocnt = dd->pbufsctxt + 1;
1670 rcd->pio_base = rcd->piocnt * uctxt;
1671 } else {
1672 rcd->piocnt = dd->pbufsctxt;
1673 rcd->pio_base = rcd->piocnt * uctxt +
1674 dd->ctxts_extrabuf;
1675 }
1676
1677
1678
1679
1680
1681
1682
1683 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1684 if (rcd->pio_base >= dd->piobcnt2k) {
1685 qib_dev_err(dd,
1686 "%u:ctxt%u: no 2KB buffers available\n",
1687 dd->unit, rcd->ctxt);
1688 ret = -ENOBUFS;
1689 goto bail;
1690 }
1691 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1692 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1693 rcd->ctxt, rcd->piocnt);
1694 }
1695
1696 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1697 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1698 TXCHK_CHG_TYPE_USER, rcd);
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1710
1711
1712
1713
1714
1715
1716
1717 ret = qib_create_rcvhdrq(dd, rcd);
1718 if (!ret)
1719 ret = qib_setup_eagerbufs(rcd);
1720 if (ret)
1721 goto bail_pio;
1722
1723 rcd->tidcursor = 0;
1724
1725
1726 rcd->urgent = 0;
1727 rcd->urgent_poll = 0;
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 if (rcd->rcvhdrtail_kvaddr)
1741 qib_clear_rcvhdrtail(rcd);
1742
1743 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1744 rcd->ctxt);
1745
1746
1747 if (rcd->subctxt_cnt) {
1748 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1749 wake_up(&rcd->wait);
1750 }
1751 return 0;
1752
1753bail_pio:
1754 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1755 TXCHK_CHG_TYPE_KERN, rcd);
1756bail:
1757 return ret;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1768{
1769 struct qib_devdata *dd = rcd->dd;
1770 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1771 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1772
1773 for (i = ctxt_tidbase; i < maxtid; i++) {
1774 struct page *p = dd->pageshadow[i];
1775 dma_addr_t phys;
1776
1777 if (!p)
1778 continue;
1779
1780 phys = dd->physshadow[i];
1781 dd->physshadow[i] = dd->tidinvalid;
1782 dd->pageshadow[i] = NULL;
1783 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1784 PCI_DMA_FROMDEVICE);
1785 qib_release_user_pages(&p, 1);
1786 cnt++;
1787 }
1788}
1789
1790static int qib_close(struct inode *in, struct file *fp)
1791{
1792 int ret = 0;
1793 struct qib_filedata *fd;
1794 struct qib_ctxtdata *rcd;
1795 struct qib_devdata *dd;
1796 unsigned long flags;
1797 unsigned ctxt;
1798
1799 mutex_lock(&qib_mutex);
1800
1801 fd = fp->private_data;
1802 fp->private_data = NULL;
1803 rcd = fd->rcd;
1804 if (!rcd) {
1805 mutex_unlock(&qib_mutex);
1806 goto bail;
1807 }
1808
1809 dd = rcd->dd;
1810
1811
1812 qib_flush_wc();
1813
1814
1815 if (fd->pq) {
1816 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1817 qib_user_sdma_queue_destroy(fd->pq);
1818 }
1819
1820 if (fd->rec_cpu_num != -1)
1821 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1822
1823 if (--rcd->cnt) {
1824
1825
1826
1827
1828
1829 rcd->active_slaves &= ~(1 << fd->subctxt);
1830 rcd->subpid[fd->subctxt] = 0;
1831 mutex_unlock(&qib_mutex);
1832 goto bail;
1833 }
1834
1835
1836 spin_lock_irqsave(&dd->uctxt_lock, flags);
1837 ctxt = rcd->ctxt;
1838 dd->rcd[ctxt] = NULL;
1839 rcd->pid = 0;
1840 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1841
1842 if (rcd->rcvwait_to || rcd->piowait_to ||
1843 rcd->rcvnowait || rcd->pionowait) {
1844 rcd->rcvwait_to = 0;
1845 rcd->piowait_to = 0;
1846 rcd->rcvnowait = 0;
1847 rcd->pionowait = 0;
1848 }
1849 if (rcd->flag)
1850 rcd->flag = 0;
1851
1852 if (dd->kregbase) {
1853
1854 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1855 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1856
1857
1858 qib_clean_part_key(rcd, dd);
1859 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1860 qib_chg_pioavailkernel(dd, rcd->pio_base,
1861 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1862
1863 dd->f_clear_tids(dd, rcd);
1864
1865 if (dd->pageshadow)
1866 unlock_expected_tids(rcd);
1867 qib_stats.sps_ctxts--;
1868 dd->freectxts++;
1869 }
1870
1871 mutex_unlock(&qib_mutex);
1872 qib_free_ctxtdata(dd, rcd);
1873
1874bail:
1875 kfree(fd);
1876 return ret;
1877}
1878
1879static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1880{
1881 struct qib_ctxt_info info;
1882 int ret;
1883 size_t sz;
1884 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1885 struct qib_filedata *fd;
1886
1887 fd = fp->private_data;
1888
1889 info.num_active = qib_count_active_units();
1890 info.unit = rcd->dd->unit;
1891 info.port = rcd->ppd->port;
1892 info.ctxt = rcd->ctxt;
1893 info.subctxt = subctxt_fp(fp);
1894
1895 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1896 info.num_subctxts = rcd->subctxt_cnt;
1897 info.rec_cpu = fd->rec_cpu_num;
1898 sz = sizeof(info);
1899
1900 if (copy_to_user(uinfo, &info, sz)) {
1901 ret = -EFAULT;
1902 goto bail;
1903 }
1904 ret = 0;
1905
1906bail:
1907 return ret;
1908}
1909
1910static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1911 u32 __user *inflightp)
1912{
1913 const u32 val = qib_user_sdma_inflight_counter(pq);
1914
1915 if (put_user(val, inflightp))
1916 return -EFAULT;
1917
1918 return 0;
1919}
1920
1921static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1922 struct qib_user_sdma_queue *pq,
1923 u32 __user *completep)
1924{
1925 u32 val;
1926 int err;
1927
1928 if (!pq)
1929 return -EINVAL;
1930
1931 err = qib_user_sdma_make_progress(ppd, pq);
1932 if (err < 0)
1933 return err;
1934
1935 val = qib_user_sdma_complete_counter(pq);
1936 if (put_user(val, completep))
1937 return -EFAULT;
1938
1939 return 0;
1940}
1941
1942static int disarm_req_delay(struct qib_ctxtdata *rcd)
1943{
1944 int ret = 0;
1945
1946 if (!usable(rcd->ppd)) {
1947 int i;
1948
1949
1950
1951
1952
1953
1954 if (rcd->user_event_mask) {
1955
1956
1957
1958
1959 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1960 &rcd->user_event_mask[0]);
1961 for (i = 1; i < rcd->subctxt_cnt; i++)
1962 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1963 &rcd->user_event_mask[i]);
1964 }
1965 for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1966 msleep(100);
1967 ret = -ENETDOWN;
1968 }
1969 return ret;
1970}
1971
1972
1973
1974
1975
1976
1977int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1978{
1979 struct qib_ctxtdata *rcd;
1980 unsigned ctxt;
1981 int ret = 0;
1982 unsigned long flags;
1983
1984 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
1985 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1986 ctxt++) {
1987 rcd = ppd->dd->rcd[ctxt];
1988 if (!rcd)
1989 continue;
1990 if (rcd->user_event_mask) {
1991 int i;
1992
1993
1994
1995
1996 set_bit(evtbit, &rcd->user_event_mask[0]);
1997 for (i = 1; i < rcd->subctxt_cnt; i++)
1998 set_bit(evtbit, &rcd->user_event_mask[i]);
1999 }
2000 ret = 1;
2001 break;
2002 }
2003 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2004
2005 return ret;
2006}
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2018 unsigned long events)
2019{
2020 int ret = 0, i;
2021
2022 for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2023 if (!test_bit(i, &events))
2024 continue;
2025 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2026 (void)qib_disarm_piobufs_ifneeded(rcd);
2027 ret = disarm_req_delay(rcd);
2028 } else
2029 clear_bit(i, &rcd->user_event_mask[subctxt]);
2030 }
2031 return ret;
2032}
2033
2034static ssize_t qib_write(struct file *fp, const char __user *data,
2035 size_t count, loff_t *off)
2036{
2037 const struct qib_cmd __user *ucmd;
2038 struct qib_ctxtdata *rcd;
2039 const void __user *src;
2040 size_t consumed, copy = 0;
2041 struct qib_cmd cmd;
2042 ssize_t ret = 0;
2043 void *dest;
2044
2045 if (!ib_safe_file_access(fp)) {
2046 pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
2047 task_tgid_vnr(current), current->comm);
2048 return -EACCES;
2049 }
2050
2051 if (count < sizeof(cmd.type)) {
2052 ret = -EINVAL;
2053 goto bail;
2054 }
2055
2056 ucmd = (const struct qib_cmd __user *) data;
2057
2058 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2059 ret = -EFAULT;
2060 goto bail;
2061 }
2062
2063 consumed = sizeof(cmd.type);
2064
2065 switch (cmd.type) {
2066 case QIB_CMD_ASSIGN_CTXT:
2067 case QIB_CMD_USER_INIT:
2068 copy = sizeof(cmd.cmd.user_info);
2069 dest = &cmd.cmd.user_info;
2070 src = &ucmd->cmd.user_info;
2071 break;
2072
2073 case QIB_CMD_RECV_CTRL:
2074 copy = sizeof(cmd.cmd.recv_ctrl);
2075 dest = &cmd.cmd.recv_ctrl;
2076 src = &ucmd->cmd.recv_ctrl;
2077 break;
2078
2079 case QIB_CMD_CTXT_INFO:
2080 copy = sizeof(cmd.cmd.ctxt_info);
2081 dest = &cmd.cmd.ctxt_info;
2082 src = &ucmd->cmd.ctxt_info;
2083 break;
2084
2085 case QIB_CMD_TID_UPDATE:
2086 case QIB_CMD_TID_FREE:
2087 copy = sizeof(cmd.cmd.tid_info);
2088 dest = &cmd.cmd.tid_info;
2089 src = &ucmd->cmd.tid_info;
2090 break;
2091
2092 case QIB_CMD_SET_PART_KEY:
2093 copy = sizeof(cmd.cmd.part_key);
2094 dest = &cmd.cmd.part_key;
2095 src = &ucmd->cmd.part_key;
2096 break;
2097
2098 case QIB_CMD_DISARM_BUFS:
2099 case QIB_CMD_PIOAVAILUPD:
2100 copy = 0;
2101 src = NULL;
2102 dest = NULL;
2103 break;
2104
2105 case QIB_CMD_POLL_TYPE:
2106 copy = sizeof(cmd.cmd.poll_type);
2107 dest = &cmd.cmd.poll_type;
2108 src = &ucmd->cmd.poll_type;
2109 break;
2110
2111 case QIB_CMD_ARMLAUNCH_CTRL:
2112 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2113 dest = &cmd.cmd.armlaunch_ctrl;
2114 src = &ucmd->cmd.armlaunch_ctrl;
2115 break;
2116
2117 case QIB_CMD_SDMA_INFLIGHT:
2118 copy = sizeof(cmd.cmd.sdma_inflight);
2119 dest = &cmd.cmd.sdma_inflight;
2120 src = &ucmd->cmd.sdma_inflight;
2121 break;
2122
2123 case QIB_CMD_SDMA_COMPLETE:
2124 copy = sizeof(cmd.cmd.sdma_complete);
2125 dest = &cmd.cmd.sdma_complete;
2126 src = &ucmd->cmd.sdma_complete;
2127 break;
2128
2129 case QIB_CMD_ACK_EVENT:
2130 copy = sizeof(cmd.cmd.event_mask);
2131 dest = &cmd.cmd.event_mask;
2132 src = &ucmd->cmd.event_mask;
2133 break;
2134
2135 default:
2136 ret = -EINVAL;
2137 goto bail;
2138 }
2139
2140 if (copy) {
2141 if ((count - consumed) < copy) {
2142 ret = -EINVAL;
2143 goto bail;
2144 }
2145 if (copy_from_user(dest, src, copy)) {
2146 ret = -EFAULT;
2147 goto bail;
2148 }
2149 consumed += copy;
2150 }
2151
2152 rcd = ctxt_fp(fp);
2153 if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2154 ret = -EINVAL;
2155 goto bail;
2156 }
2157
2158 switch (cmd.type) {
2159 case QIB_CMD_ASSIGN_CTXT:
2160 if (rcd) {
2161 ret = -EINVAL;
2162 goto bail;
2163 }
2164
2165 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2166 if (ret)
2167 goto bail;
2168 break;
2169
2170 case QIB_CMD_USER_INIT:
2171 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2172 if (ret)
2173 goto bail;
2174 ret = qib_get_base_info(fp, u64_to_user_ptr(
2175 cmd.cmd.user_info.spu_base_info),
2176 cmd.cmd.user_info.spu_base_info_size);
2177 break;
2178
2179 case QIB_CMD_RECV_CTRL:
2180 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2181 break;
2182
2183 case QIB_CMD_CTXT_INFO:
2184 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2185 (unsigned long) cmd.cmd.ctxt_info);
2186 break;
2187
2188 case QIB_CMD_TID_UPDATE:
2189 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2190 break;
2191
2192 case QIB_CMD_TID_FREE:
2193 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2194 break;
2195
2196 case QIB_CMD_SET_PART_KEY:
2197 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2198 break;
2199
2200 case QIB_CMD_DISARM_BUFS:
2201 (void)qib_disarm_piobufs_ifneeded(rcd);
2202 ret = disarm_req_delay(rcd);
2203 break;
2204
2205 case QIB_CMD_PIOAVAILUPD:
2206 qib_force_pio_avail_update(rcd->dd);
2207 break;
2208
2209 case QIB_CMD_POLL_TYPE:
2210 rcd->poll_type = cmd.cmd.poll_type;
2211 break;
2212
2213 case QIB_CMD_ARMLAUNCH_CTRL:
2214 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2215 break;
2216
2217 case QIB_CMD_SDMA_INFLIGHT:
2218 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2219 (u32 __user *) (unsigned long)
2220 cmd.cmd.sdma_inflight);
2221 break;
2222
2223 case QIB_CMD_SDMA_COMPLETE:
2224 ret = qib_sdma_get_complete(rcd->ppd,
2225 user_sdma_queue_fp(fp),
2226 (u32 __user *) (unsigned long)
2227 cmd.cmd.sdma_complete);
2228 break;
2229
2230 case QIB_CMD_ACK_EVENT:
2231 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2232 cmd.cmd.event_mask);
2233 break;
2234 }
2235
2236 if (ret >= 0)
2237 ret = consumed;
2238
2239bail:
2240 return ret;
2241}
2242
2243static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
2244{
2245 struct qib_filedata *fp = iocb->ki_filp->private_data;
2246 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2247 struct qib_user_sdma_queue *pq = fp->pq;
2248
2249 if (!iter_is_iovec(from) || !from->nr_segs || !pq)
2250 return -EINVAL;
2251
2252 return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
2253}
2254
2255static struct class *qib_class;
2256static dev_t qib_dev;
2257
2258int qib_cdev_init(int minor, const char *name,
2259 const struct file_operations *fops,
2260 struct cdev **cdevp, struct device **devp)
2261{
2262 const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2263 struct cdev *cdev;
2264 struct device *device = NULL;
2265 int ret;
2266
2267 cdev = cdev_alloc();
2268 if (!cdev) {
2269 pr_err("Could not allocate cdev for minor %d, %s\n",
2270 minor, name);
2271 ret = -ENOMEM;
2272 goto done;
2273 }
2274
2275 cdev->owner = THIS_MODULE;
2276 cdev->ops = fops;
2277 kobject_set_name(&cdev->kobj, name);
2278
2279 ret = cdev_add(cdev, dev, 1);
2280 if (ret < 0) {
2281 pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2282 minor, name, -ret);
2283 goto err_cdev;
2284 }
2285
2286 device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2287 if (!IS_ERR(device))
2288 goto done;
2289 ret = PTR_ERR(device);
2290 device = NULL;
2291 pr_err("Could not create device for minor %d, %s (err %d)\n",
2292 minor, name, -ret);
2293err_cdev:
2294 cdev_del(cdev);
2295 cdev = NULL;
2296done:
2297 *cdevp = cdev;
2298 *devp = device;
2299 return ret;
2300}
2301
2302void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2303{
2304 struct device *device = *devp;
2305
2306 if (device) {
2307 device_unregister(device);
2308 *devp = NULL;
2309 }
2310
2311 if (*cdevp) {
2312 cdev_del(*cdevp);
2313 *cdevp = NULL;
2314 }
2315}
2316
2317static struct cdev *wildcard_cdev;
2318static struct device *wildcard_device;
2319
2320int __init qib_dev_init(void)
2321{
2322 int ret;
2323
2324 ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2325 if (ret < 0) {
2326 pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2327 goto done;
2328 }
2329
2330 qib_class = class_create(THIS_MODULE, "ipath");
2331 if (IS_ERR(qib_class)) {
2332 ret = PTR_ERR(qib_class);
2333 pr_err("Could not create device class (err %d)\n", -ret);
2334 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2335 }
2336
2337done:
2338 return ret;
2339}
2340
2341void qib_dev_cleanup(void)
2342{
2343 if (qib_class) {
2344 class_destroy(qib_class);
2345 qib_class = NULL;
2346 }
2347
2348 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2349}
2350
2351static atomic_t user_count = ATOMIC_INIT(0);
2352
2353static void qib_user_remove(struct qib_devdata *dd)
2354{
2355 if (atomic_dec_return(&user_count) == 0)
2356 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2357
2358 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2359}
2360
2361static int qib_user_add(struct qib_devdata *dd)
2362{
2363 char name[10];
2364 int ret;
2365
2366 if (atomic_inc_return(&user_count) == 1) {
2367 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2368 &wildcard_cdev, &wildcard_device);
2369 if (ret)
2370 goto done;
2371 }
2372
2373 snprintf(name, sizeof(name), "ipath%d", dd->unit);
2374 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2375 &dd->user_cdev, &dd->user_device);
2376 if (ret)
2377 qib_user_remove(dd);
2378done:
2379 return ret;
2380}
2381
2382
2383
2384
2385int qib_device_create(struct qib_devdata *dd)
2386{
2387 int r, ret;
2388
2389 r = qib_user_add(dd);
2390 ret = qib_diag_add(dd);
2391 if (r && !ret)
2392 ret = r;
2393 return ret;
2394}
2395
2396
2397
2398
2399
2400void qib_device_remove(struct qib_devdata *dd)
2401{
2402 qib_user_remove(dd);
2403 qib_diag_remove(dd);
2404}
2405