1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39#include <linux/idr.h>
40#include <linux/module.h>
41#include <linux/printk.h>
42#ifdef CONFIG_INFINIBAND_QIB_DCA
43#include <linux/dca.h>
44#endif
45#include <rdma/rdma_vt.h>
46
47#include "qib.h"
48#include "qib_common.h"
49#include "qib_mad.h"
50#ifdef CONFIG_DEBUG_FS
51#include "qib_debugfs.h"
52#include "qib_verbs.h"
53#endif
54
55#undef pr_fmt
56#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
57
58
59
60
61#define QIB_MIN_USER_CTXT_BUFCNT 7
62
63#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
64#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
65#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
66
67
68
69
70
71ushort qib_cfgctxts;
72module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
73MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
74
75unsigned qib_numa_aware;
76module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
77MODULE_PARM_DESC(numa_aware,
78 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
79
80
81
82
83
84ushort qib_mini_init;
85module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
86MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
87
88unsigned qib_n_krcv_queues;
89module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
90MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
91
92unsigned qib_cc_table_size;
93module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
94MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
95
96static void verify_interrupt(struct timer_list *);
97
98static struct idr qib_unit_table;
99u32 qib_cpulist_count;
100unsigned long *qib_cpulist;
101
102
103void qib_set_ctxtcnt(struct qib_devdata *dd)
104{
105 if (!qib_cfgctxts) {
106 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
107 if (dd->cfgctxts > dd->ctxtcnt)
108 dd->cfgctxts = dd->ctxtcnt;
109 } else if (qib_cfgctxts < dd->num_pports)
110 dd->cfgctxts = dd->ctxtcnt;
111 else if (qib_cfgctxts <= dd->ctxtcnt)
112 dd->cfgctxts = qib_cfgctxts;
113 else
114 dd->cfgctxts = dd->ctxtcnt;
115 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
116 dd->cfgctxts - dd->first_user_ctxt;
117}
118
119
120
121
122int qib_create_ctxts(struct qib_devdata *dd)
123{
124 unsigned i;
125 int local_node_id = pcibus_to_node(dd->pcidev->bus);
126
127 if (local_node_id < 0)
128 local_node_id = numa_node_id();
129 dd->assigned_node_id = local_node_id;
130
131
132
133
134
135 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
136 if (!dd->rcd)
137 return -ENOMEM;
138
139
140 for (i = 0; i < dd->first_user_ctxt; ++i) {
141 struct qib_pportdata *ppd;
142 struct qib_ctxtdata *rcd;
143
144 if (dd->skip_kctxt_mask & (1 << i))
145 continue;
146
147 ppd = dd->pport + (i % dd->num_pports);
148
149 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
150 if (!rcd) {
151 qib_dev_err(dd,
152 "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
153 kfree(dd->rcd);
154 dd->rcd = NULL;
155 return -ENOMEM;
156 }
157 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
158 rcd->seq_cnt = 1;
159 }
160 return 0;
161}
162
163
164
165
166struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
167 int node_id)
168{
169 struct qib_devdata *dd = ppd->dd;
170 struct qib_ctxtdata *rcd;
171
172 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
173 if (rcd) {
174 INIT_LIST_HEAD(&rcd->qp_wait_list);
175 rcd->node_id = node_id;
176 rcd->ppd = ppd;
177 rcd->dd = dd;
178 rcd->cnt = 1;
179 rcd->ctxt = ctxt;
180 dd->rcd[ctxt] = rcd;
181#ifdef CONFIG_DEBUG_FS
182 if (ctxt < dd->first_user_ctxt) {
183 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
184 GFP_KERNEL, node_id);
185 if (!rcd->opstats) {
186 kfree(rcd);
187 qib_dev_err(dd,
188 "Unable to allocate per ctxt stats buffer\n");
189 return NULL;
190 }
191 }
192#endif
193 dd->f_init_ctxt(rcd);
194
195
196
197
198
199
200
201
202
203
204
205
206 rcd->rcvegrbuf_size = 0x8000;
207 rcd->rcvegrbufs_perchunk =
208 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
209 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
210 rcd->rcvegrbufs_perchunk - 1) /
211 rcd->rcvegrbufs_perchunk;
212 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
213 rcd->rcvegrbufs_perchunk_shift =
214 ilog2(rcd->rcvegrbufs_perchunk);
215 }
216 return rcd;
217}
218
219
220
221
222int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
223 u8 hw_pidx, u8 port)
224{
225 int size;
226
227 ppd->dd = dd;
228 ppd->hw_pidx = hw_pidx;
229 ppd->port = port;
230
231 spin_lock_init(&ppd->sdma_lock);
232 spin_lock_init(&ppd->lflags_lock);
233 spin_lock_init(&ppd->cc_shadow_lock);
234 init_waitqueue_head(&ppd->state_wait);
235
236 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
237
238 ppd->qib_wq = NULL;
239 ppd->ibport_data.pmastats =
240 alloc_percpu(struct qib_pma_counters);
241 if (!ppd->ibport_data.pmastats)
242 return -ENOMEM;
243 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
244 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
245 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
246 if (!(ppd->ibport_data.rvp.rc_acks) ||
247 !(ppd->ibport_data.rvp.rc_qacks) ||
248 !(ppd->ibport_data.rvp.rc_delayed_comp))
249 return -ENOMEM;
250
251 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
252 goto bail;
253
254 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
255 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
256
257 ppd->cc_max_table_entries =
258 ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
259
260 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
261 * IB_CCT_ENTRIES;
262 ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
263 if (!ppd->ccti_entries)
264 goto bail;
265
266 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
267 ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
268 if (!ppd->congestion_entries)
269 goto bail_1;
270
271 size = sizeof(struct cc_table_shadow);
272 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
273 if (!ppd->ccti_entries_shadow)
274 goto bail_2;
275
276 size = sizeof(struct ib_cc_congestion_setting_attr);
277 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
278 if (!ppd->congestion_entries_shadow)
279 goto bail_3;
280
281 return 0;
282
283bail_3:
284 kfree(ppd->ccti_entries_shadow);
285 ppd->ccti_entries_shadow = NULL;
286bail_2:
287 kfree(ppd->congestion_entries);
288 ppd->congestion_entries = NULL;
289bail_1:
290 kfree(ppd->ccti_entries);
291 ppd->ccti_entries = NULL;
292bail:
293
294 if (!qib_cc_table_size)
295 return 0;
296
297 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
298 qib_cc_table_size = 0;
299 qib_dev_err(dd,
300 "Congestion Control table size %d less than minimum %d for port %d\n",
301 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
302 }
303
304 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
305 port);
306 return 0;
307}
308
309static int init_pioavailregs(struct qib_devdata *dd)
310{
311 int ret, pidx;
312 u64 *status_page;
313
314 dd->pioavailregs_dma = dma_alloc_coherent(
315 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
316 GFP_KERNEL);
317 if (!dd->pioavailregs_dma) {
318 qib_dev_err(dd,
319 "failed to allocate PIOavail reg area in memory\n");
320 ret = -ENOMEM;
321 goto done;
322 }
323
324
325
326
327
328 status_page = (u64 *)
329 ((char *) dd->pioavailregs_dma +
330 ((2 * L1_CACHE_BYTES +
331 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
332
333 dd->devstatusp = status_page;
334 *status_page++ = 0;
335 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
336 dd->pport[pidx].statusp = status_page;
337 *status_page++ = 0;
338 }
339
340
341
342
343
344 dd->freezemsg = (char *) status_page;
345 *dd->freezemsg = 0;
346
347 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
348 dd->freezelen = PAGE_SIZE - ret;
349
350 ret = 0;
351
352done:
353 return ret;
354}
355
356
357
358
359
360
361
362
363
364
365
366
367static void init_shadow_tids(struct qib_devdata *dd)
368{
369 struct page **pages;
370 dma_addr_t *addrs;
371
372 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
373 if (!pages)
374 goto bail;
375
376 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
377 if (!addrs)
378 goto bail_free;
379
380 dd->pageshadow = pages;
381 dd->physshadow = addrs;
382 return;
383
384bail_free:
385 vfree(pages);
386bail:
387 dd->pageshadow = NULL;
388}
389
390
391
392
393
394static int loadtime_init(struct qib_devdata *dd)
395{
396 int ret = 0;
397
398 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
399 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
400 qib_dev_err(dd,
401 "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
402 QIB_CHIP_SWVERSION,
403 (int)(dd->revision >>
404 QLOGIC_IB_R_SOFTWARE_SHIFT) &
405 QLOGIC_IB_R_SOFTWARE_MASK,
406 (unsigned long long) dd->revision);
407 ret = -ENOSYS;
408 goto done;
409 }
410
411 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
412 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
413
414 spin_lock_init(&dd->pioavail_lock);
415 spin_lock_init(&dd->sendctrl_lock);
416 spin_lock_init(&dd->uctxt_lock);
417 spin_lock_init(&dd->qib_diag_trans_lock);
418 spin_lock_init(&dd->eep_st_lock);
419 mutex_init(&dd->eep_lock);
420
421 if (qib_mini_init)
422 goto done;
423
424 ret = init_pioavailregs(dd);
425 init_shadow_tids(dd);
426
427 qib_get_eeprom_info(dd);
428
429
430 timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
431done:
432 return ret;
433}
434
435
436
437
438
439
440
441
442
443static int init_after_reset(struct qib_devdata *dd)
444{
445 int i;
446
447
448
449
450
451
452 for (i = 0; i < dd->num_pports; ++i) {
453
454
455
456
457 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
458 QIB_RCVCTRL_INTRAVAIL_DIS |
459 QIB_RCVCTRL_TAILUPD_DIS, -1);
460
461 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
462 QIB_SENDCTRL_AVAIL_DIS);
463 }
464
465 return 0;
466}
467
468static void enable_chip(struct qib_devdata *dd)
469{
470 u64 rcvmask;
471 int i;
472
473
474
475
476 for (i = 0; i < dd->num_pports; ++i)
477 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
478 QIB_SENDCTRL_AVAIL_ENB);
479
480
481
482
483 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
484 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
485 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
486 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
487 struct qib_ctxtdata *rcd = dd->rcd[i];
488
489 if (rcd)
490 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
491 }
492}
493
494static void verify_interrupt(struct timer_list *t)
495{
496 struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
497 u64 int_counter;
498
499 if (!dd)
500 return;
501
502
503
504
505
506 int_counter = qib_int_counter(dd) - dd->z_int_counter;
507 if (int_counter == 0) {
508 if (!dd->f_intr_fallback(dd))
509 dev_err(&dd->pcidev->dev,
510 "No interrupts detected, not usable.\n");
511 else
512 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
513 }
514}
515
516static void init_piobuf_state(struct qib_devdata *dd)
517{
518 int i, pidx;
519 u32 uctxts;
520
521
522
523
524
525
526
527
528
529 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
530 for (pidx = 0; pidx < dd->num_pports; ++pidx)
531 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
532
533
534
535
536
537
538
539 uctxts = dd->cfgctxts - dd->first_user_ctxt;
540 dd->ctxts_extrabuf = dd->pbufsctxt ?
541 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
542
543
544
545
546
547
548
549
550
551
552 for (i = 0; i < dd->pioavregs; i++) {
553 __le64 tmp;
554
555 tmp = dd->pioavailregs_dma[i];
556
557
558
559
560
561 dd->pioavailshadow[i] = le64_to_cpu(tmp);
562 }
563 while (i < ARRAY_SIZE(dd->pioavailshadow))
564 dd->pioavailshadow[i++] = 0;
565
566
567 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
568 TXCHK_CHG_TYPE_KERN, NULL);
569 dd->f_initvl15_bufs(dd);
570}
571
572
573
574
575
576static int qib_create_workqueues(struct qib_devdata *dd)
577{
578 int pidx;
579 struct qib_pportdata *ppd;
580
581 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
582 ppd = dd->pport + pidx;
583 if (!ppd->qib_wq) {
584 char wq_name[8];
585
586 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
587 dd->unit, pidx);
588 ppd->qib_wq = alloc_ordered_workqueue(wq_name,
589 WQ_MEM_RECLAIM);
590 if (!ppd->qib_wq)
591 goto wq_error;
592 }
593 }
594 return 0;
595wq_error:
596 pr_err("create_singlethread_workqueue failed for port %d\n",
597 pidx + 1);
598 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
599 ppd = dd->pport + pidx;
600 if (ppd->qib_wq) {
601 destroy_workqueue(ppd->qib_wq);
602 ppd->qib_wq = NULL;
603 }
604 }
605 return -ENOMEM;
606}
607
608static void qib_free_pportdata(struct qib_pportdata *ppd)
609{
610 free_percpu(ppd->ibport_data.pmastats);
611 free_percpu(ppd->ibport_data.rvp.rc_acks);
612 free_percpu(ppd->ibport_data.rvp.rc_qacks);
613 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
614 ppd->ibport_data.pmastats = NULL;
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632int qib_init(struct qib_devdata *dd, int reinit)
633{
634 int ret = 0, pidx, lastfail = 0;
635 u32 portok = 0;
636 unsigned i;
637 struct qib_ctxtdata *rcd;
638 struct qib_pportdata *ppd;
639 unsigned long flags;
640
641
642 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
643 ppd = dd->pport + pidx;
644 spin_lock_irqsave(&ppd->lflags_lock, flags);
645 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
646 QIBL_LINKDOWN | QIBL_LINKINIT |
647 QIBL_LINKV);
648 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
649 }
650
651 if (reinit)
652 ret = init_after_reset(dd);
653 else
654 ret = loadtime_init(dd);
655 if (ret)
656 goto done;
657
658
659 if (qib_mini_init)
660 return 0;
661
662 ret = dd->f_late_initreg(dd);
663 if (ret)
664 goto done;
665
666
667 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
668
669
670
671
672
673
674 rcd = dd->rcd[i];
675 if (!rcd)
676 continue;
677
678 lastfail = qib_create_rcvhdrq(dd, rcd);
679 if (!lastfail)
680 lastfail = qib_setup_eagerbufs(rcd);
681 if (lastfail)
682 qib_dev_err(dd,
683 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
684 }
685
686 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
687 int mtu;
688
689 if (lastfail)
690 ret = lastfail;
691 ppd = dd->pport + pidx;
692 mtu = ib_mtu_enum_to_int(qib_ibmtu);
693 if (mtu == -1) {
694 mtu = QIB_DEFAULT_MTU;
695 qib_ibmtu = 0;
696 }
697
698 ppd->init_ibmaxlen = min(mtu > 2048 ?
699 dd->piosize4k : dd->piosize2k,
700 dd->rcvegrbufsize +
701 (dd->rcvhdrentsize << 2));
702
703
704
705
706 ppd->ibmaxlen = ppd->init_ibmaxlen;
707 qib_set_mtu(ppd, mtu);
708
709 spin_lock_irqsave(&ppd->lflags_lock, flags);
710 ppd->lflags |= QIBL_IB_LINK_DISABLED;
711 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
712
713 lastfail = dd->f_bringup_serdes(ppd);
714 if (lastfail) {
715 qib_devinfo(dd->pcidev,
716 "Failed to bringup IB port %u\n", ppd->port);
717 lastfail = -ENETDOWN;
718 continue;
719 }
720
721 portok++;
722 }
723
724 if (!portok) {
725
726 if (!ret && lastfail)
727 ret = lastfail;
728 else if (!ret)
729 ret = -ENETDOWN;
730
731 }
732
733 enable_chip(dd);
734
735 init_piobuf_state(dd);
736
737done:
738 if (!ret) {
739
740 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
741 ppd = dd->pport + pidx;
742
743
744
745
746 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
747 QIB_STATUS_INITTED;
748 if (!ppd->link_speed_enabled)
749 continue;
750 if (dd->flags & QIB_HAS_SEND_DMA)
751 ret = qib_setup_sdma(ppd);
752 timer_setup(&ppd->hol_timer, qib_hol_event, 0);
753 ppd->hol_state = QIB_HOL_UP;
754 }
755
756
757 dd->f_set_intr_state(dd, 1);
758
759
760
761
762
763 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
764
765 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
766 }
767
768
769 return ret;
770}
771
772
773
774
775
776
777
778int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
779{
780 return -EOPNOTSUPP;
781}
782
783void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
784{
785}
786
787static inline struct qib_devdata *__qib_lookup(int unit)
788{
789 return idr_find(&qib_unit_table, unit);
790}
791
792struct qib_devdata *qib_lookup(int unit)
793{
794 struct qib_devdata *dd;
795 unsigned long flags;
796
797 spin_lock_irqsave(&qib_devs_lock, flags);
798 dd = __qib_lookup(unit);
799 spin_unlock_irqrestore(&qib_devs_lock, flags);
800
801 return dd;
802}
803
804
805
806
807
808static void qib_stop_timers(struct qib_devdata *dd)
809{
810 struct qib_pportdata *ppd;
811 int pidx;
812
813 if (dd->stats_timer.function)
814 del_timer_sync(&dd->stats_timer);
815 if (dd->intrchk_timer.function)
816 del_timer_sync(&dd->intrchk_timer);
817 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
818 ppd = dd->pport + pidx;
819 if (ppd->hol_timer.function)
820 del_timer_sync(&ppd->hol_timer);
821 if (ppd->led_override_timer.function) {
822 del_timer_sync(&ppd->led_override_timer);
823 atomic_set(&ppd->led_override_timer_active, 0);
824 }
825 if (ppd->symerr_clear_timer.function)
826 del_timer_sync(&ppd->symerr_clear_timer);
827 }
828}
829
830
831
832
833
834
835
836
837
838
839static void qib_shutdown_device(struct qib_devdata *dd)
840{
841 struct qib_pportdata *ppd;
842 unsigned pidx;
843
844 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
845 ppd = dd->pport + pidx;
846
847 spin_lock_irq(&ppd->lflags_lock);
848 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
849 QIBL_LINKARMED | QIBL_LINKACTIVE |
850 QIBL_LINKV);
851 spin_unlock_irq(&ppd->lflags_lock);
852 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
853 }
854 dd->flags &= ~QIB_INITTED;
855
856
857 dd->f_set_intr_state(dd, 0);
858
859 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
860 ppd = dd->pport + pidx;
861 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
862 QIB_RCVCTRL_CTXT_DIS |
863 QIB_RCVCTRL_INTRAVAIL_DIS |
864 QIB_RCVCTRL_PKEY_ENB, -1);
865
866
867
868
869 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
870 }
871
872
873
874
875
876 udelay(20);
877
878 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
879 ppd = dd->pport + pidx;
880 dd->f_setextled(ppd, 0);
881
882 if (dd->flags & QIB_HAS_SEND_DMA)
883 qib_teardown_sdma(ppd);
884
885 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
886 QIB_SENDCTRL_SEND_DIS);
887
888
889
890
891 dd->f_quiet_serdes(ppd);
892
893 if (ppd->qib_wq) {
894 destroy_workqueue(ppd->qib_wq);
895 ppd->qib_wq = NULL;
896 }
897 qib_free_pportdata(ppd);
898 }
899
900}
901
902
903
904
905
906
907
908
909
910
911
912
913void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
914{
915 if (!rcd)
916 return;
917
918 if (rcd->rcvhdrq) {
919 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
920 rcd->rcvhdrq, rcd->rcvhdrq_phys);
921 rcd->rcvhdrq = NULL;
922 if (rcd->rcvhdrtail_kvaddr) {
923 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
924 rcd->rcvhdrtail_kvaddr,
925 rcd->rcvhdrqtailaddr_phys);
926 rcd->rcvhdrtail_kvaddr = NULL;
927 }
928 }
929 if (rcd->rcvegrbuf) {
930 unsigned e;
931
932 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
933 void *base = rcd->rcvegrbuf[e];
934 size_t size = rcd->rcvegrbuf_size;
935
936 dma_free_coherent(&dd->pcidev->dev, size,
937 base, rcd->rcvegrbuf_phys[e]);
938 }
939 kfree(rcd->rcvegrbuf);
940 rcd->rcvegrbuf = NULL;
941 kfree(rcd->rcvegrbuf_phys);
942 rcd->rcvegrbuf_phys = NULL;
943 rcd->rcvegrbuf_chunks = 0;
944 }
945
946 kfree(rcd->tid_pg_list);
947 vfree(rcd->user_event_mask);
948 vfree(rcd->subctxt_uregbase);
949 vfree(rcd->subctxt_rcvegrbuf);
950 vfree(rcd->subctxt_rcvhdr_base);
951#ifdef CONFIG_DEBUG_FS
952 kfree(rcd->opstats);
953 rcd->opstats = NULL;
954#endif
955 kfree(rcd);
956}
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971static void qib_verify_pioperf(struct qib_devdata *dd)
972{
973 u32 pbnum, cnt, lcnt;
974 u32 __iomem *piobuf;
975 u32 *addr;
976 u64 msecs, emsecs;
977
978 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
979 if (!piobuf) {
980 qib_devinfo(dd->pcidev,
981 "No PIObufs for checking perf, skipping\n");
982 return;
983 }
984
985
986
987
988
989 cnt = 1024;
990
991 addr = vmalloc(cnt);
992 if (!addr)
993 goto done;
994
995 preempt_disable();
996 msecs = 1 + jiffies_to_msecs(jiffies);
997 for (lcnt = 0; lcnt < 10000U; lcnt++) {
998
999 if (jiffies_to_msecs(jiffies) >= msecs)
1000 break;
1001 udelay(1);
1002 }
1003
1004 dd->f_set_armlaunch(dd, 0);
1005
1006
1007
1008
1009 writeq(0, piobuf);
1010 qib_flush_wc();
1011
1012
1013
1014
1015
1016
1017 msecs = jiffies_to_msecs(jiffies);
1018 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
1019 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
1020 emsecs = jiffies_to_msecs(jiffies) - msecs;
1021 }
1022
1023
1024 if (lcnt < (emsecs * 1024U))
1025 qib_dev_err(dd,
1026 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
1027 lcnt / (u32) emsecs);
1028
1029 preempt_enable();
1030
1031 vfree(addr);
1032
1033done:
1034
1035 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1036 qib_sendbuf_done(dd, pbnum);
1037 dd->f_set_armlaunch(dd, 1);
1038}
1039
1040void qib_free_devdata(struct qib_devdata *dd)
1041{
1042 unsigned long flags;
1043
1044 spin_lock_irqsave(&qib_devs_lock, flags);
1045 idr_remove(&qib_unit_table, dd->unit);
1046 list_del(&dd->list);
1047 spin_unlock_irqrestore(&qib_devs_lock, flags);
1048
1049#ifdef CONFIG_DEBUG_FS
1050 qib_dbg_ibdev_exit(&dd->verbs_dev);
1051#endif
1052 free_percpu(dd->int_counter);
1053 rvt_dealloc_device(&dd->verbs_dev.rdi);
1054}
1055
1056u64 qib_int_counter(struct qib_devdata *dd)
1057{
1058 int cpu;
1059 u64 int_counter = 0;
1060
1061 for_each_possible_cpu(cpu)
1062 int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1063 return int_counter;
1064}
1065
1066u64 qib_sps_ints(void)
1067{
1068 unsigned long flags;
1069 struct qib_devdata *dd;
1070 u64 sps_ints = 0;
1071
1072 spin_lock_irqsave(&qib_devs_lock, flags);
1073 list_for_each_entry(dd, &qib_dev_list, list) {
1074 sps_ints += qib_int_counter(dd);
1075 }
1076 spin_unlock_irqrestore(&qib_devs_lock, flags);
1077 return sps_ints;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1089{
1090 unsigned long flags;
1091 struct qib_devdata *dd;
1092 int ret, nports;
1093
1094
1095 nports = extra / sizeof(struct qib_pportdata);
1096 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1097 nports);
1098 if (!dd)
1099 return ERR_PTR(-ENOMEM);
1100
1101 INIT_LIST_HEAD(&dd->list);
1102
1103 idr_preload(GFP_KERNEL);
1104 spin_lock_irqsave(&qib_devs_lock, flags);
1105
1106 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1107 if (ret >= 0) {
1108 dd->unit = ret;
1109 list_add(&dd->list, &qib_dev_list);
1110 }
1111
1112 spin_unlock_irqrestore(&qib_devs_lock, flags);
1113 idr_preload_end();
1114
1115 if (ret < 0) {
1116 qib_early_err(&pdev->dev,
1117 "Could not allocate unit ID: error %d\n", -ret);
1118 goto bail;
1119 }
1120 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
1121
1122 dd->int_counter = alloc_percpu(u64);
1123 if (!dd->int_counter) {
1124 ret = -ENOMEM;
1125 qib_early_err(&pdev->dev,
1126 "Could not allocate per-cpu int_counter\n");
1127 goto bail;
1128 }
1129
1130 if (!qib_cpulist_count) {
1131 u32 count = num_online_cpus();
1132
1133 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
1134 sizeof(long), GFP_KERNEL);
1135 if (qib_cpulist)
1136 qib_cpulist_count = count;
1137 }
1138#ifdef CONFIG_DEBUG_FS
1139 qib_dbg_ibdev_init(&dd->verbs_dev);
1140#endif
1141 return dd;
1142bail:
1143 if (!list_empty(&dd->list))
1144 list_del_init(&dd->list);
1145 rvt_dealloc_device(&dd->verbs_dev.rdi);
1146 return ERR_PTR(ret);
1147}
1148
1149
1150
1151
1152
1153
1154void qib_disable_after_error(struct qib_devdata *dd)
1155{
1156 if (dd->flags & QIB_INITTED) {
1157 u32 pidx;
1158
1159 dd->flags &= ~QIB_INITTED;
1160 if (dd->pport)
1161 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1162 struct qib_pportdata *ppd;
1163
1164 ppd = dd->pport + pidx;
1165 if (dd->flags & QIB_PRESENT) {
1166 qib_set_linkstate(ppd,
1167 QIB_IB_LINKDOWN_DISABLE);
1168 dd->f_setextled(ppd, 0);
1169 }
1170 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1171 }
1172 }
1173
1174
1175
1176
1177
1178
1179 if (dd->devstatusp)
1180 *dd->devstatusp |= QIB_STATUS_HWERROR;
1181}
1182
1183static void qib_remove_one(struct pci_dev *);
1184static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1185
1186#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1187#define PFX QIB_DRV_NAME ": "
1188
1189static const struct pci_device_id qib_pci_tbl[] = {
1190 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1191 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1192 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1193 { 0, }
1194};
1195
1196MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1197
1198static struct pci_driver qib_driver = {
1199 .name = QIB_DRV_NAME,
1200 .probe = qib_init_one,
1201 .remove = qib_remove_one,
1202 .id_table = qib_pci_tbl,
1203 .err_handler = &qib_pci_err_handler,
1204};
1205
1206#ifdef CONFIG_INFINIBAND_QIB_DCA
1207
1208static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1209static struct notifier_block dca_notifier = {
1210 .notifier_call = qib_notify_dca,
1211 .next = NULL,
1212 .priority = 0
1213};
1214
1215static int qib_notify_dca_device(struct device *device, void *data)
1216{
1217 struct qib_devdata *dd = dev_get_drvdata(device);
1218 unsigned long event = *(unsigned long *)data;
1219
1220 return dd->f_notify_dca(dd, event);
1221}
1222
1223static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1224 void *p)
1225{
1226 int rval;
1227
1228 rval = driver_for_each_device(&qib_driver.driver, NULL,
1229 &event, qib_notify_dca_device);
1230 return rval ? NOTIFY_BAD : NOTIFY_DONE;
1231}
1232
1233#endif
1234
1235
1236
1237
1238
1239static int __init qib_ib_init(void)
1240{
1241 int ret;
1242
1243 ret = qib_dev_init();
1244 if (ret)
1245 goto bail;
1246
1247
1248
1249
1250
1251 idr_init(&qib_unit_table);
1252
1253#ifdef CONFIG_INFINIBAND_QIB_DCA
1254 dca_register_notify(&dca_notifier);
1255#endif
1256#ifdef CONFIG_DEBUG_FS
1257 qib_dbg_init();
1258#endif
1259 ret = pci_register_driver(&qib_driver);
1260 if (ret < 0) {
1261 pr_err("Unable to register driver: error %d\n", -ret);
1262 goto bail_dev;
1263 }
1264
1265
1266 if (qib_init_qibfs())
1267 pr_err("Unable to register ipathfs\n");
1268 goto bail;
1269
1270bail_dev:
1271#ifdef CONFIG_INFINIBAND_QIB_DCA
1272 dca_unregister_notify(&dca_notifier);
1273#endif
1274#ifdef CONFIG_DEBUG_FS
1275 qib_dbg_exit();
1276#endif
1277 idr_destroy(&qib_unit_table);
1278 qib_dev_cleanup();
1279bail:
1280 return ret;
1281}
1282
1283module_init(qib_ib_init);
1284
1285
1286
1287
1288static void __exit qib_ib_cleanup(void)
1289{
1290 int ret;
1291
1292 ret = qib_exit_qibfs();
1293 if (ret)
1294 pr_err(
1295 "Unable to cleanup counter filesystem: error %d\n",
1296 -ret);
1297
1298#ifdef CONFIG_INFINIBAND_QIB_DCA
1299 dca_unregister_notify(&dca_notifier);
1300#endif
1301 pci_unregister_driver(&qib_driver);
1302#ifdef CONFIG_DEBUG_FS
1303 qib_dbg_exit();
1304#endif
1305
1306 qib_cpulist_count = 0;
1307 kfree(qib_cpulist);
1308
1309 idr_destroy(&qib_unit_table);
1310 qib_dev_cleanup();
1311}
1312
1313module_exit(qib_ib_cleanup);
1314
1315
1316static void cleanup_device_data(struct qib_devdata *dd)
1317{
1318 int ctxt;
1319 int pidx;
1320 struct qib_ctxtdata **tmp;
1321 unsigned long flags;
1322
1323
1324 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1325 if (dd->pport[pidx].statusp)
1326 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1327
1328 spin_lock(&dd->pport[pidx].cc_shadow_lock);
1329
1330 kfree(dd->pport[pidx].congestion_entries);
1331 dd->pport[pidx].congestion_entries = NULL;
1332 kfree(dd->pport[pidx].ccti_entries);
1333 dd->pport[pidx].ccti_entries = NULL;
1334 kfree(dd->pport[pidx].ccti_entries_shadow);
1335 dd->pport[pidx].ccti_entries_shadow = NULL;
1336 kfree(dd->pport[pidx].congestion_entries_shadow);
1337 dd->pport[pidx].congestion_entries_shadow = NULL;
1338
1339 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1340 }
1341
1342 qib_disable_wc(dd);
1343
1344 if (dd->pioavailregs_dma) {
1345 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1346 (void *) dd->pioavailregs_dma,
1347 dd->pioavailregs_phys);
1348 dd->pioavailregs_dma = NULL;
1349 }
1350
1351 if (dd->pageshadow) {
1352 struct page **tmpp = dd->pageshadow;
1353 dma_addr_t *tmpd = dd->physshadow;
1354 int i;
1355
1356 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1357 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1358 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1359
1360 for (i = ctxt_tidbase; i < maxtid; i++) {
1361 if (!tmpp[i])
1362 continue;
1363 pci_unmap_page(dd->pcidev, tmpd[i],
1364 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1365 qib_release_user_pages(&tmpp[i], 1);
1366 tmpp[i] = NULL;
1367 }
1368 }
1369
1370 dd->pageshadow = NULL;
1371 vfree(tmpp);
1372 dd->physshadow = NULL;
1373 vfree(tmpd);
1374 }
1375
1376
1377
1378
1379
1380
1381
1382
1383 spin_lock_irqsave(&dd->uctxt_lock, flags);
1384 tmp = dd->rcd;
1385 dd->rcd = NULL;
1386 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1387 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1388 struct qib_ctxtdata *rcd = tmp[ctxt];
1389
1390 tmp[ctxt] = NULL;
1391 qib_free_ctxtdata(dd, rcd);
1392 }
1393 kfree(tmp);
1394}
1395
1396
1397
1398
1399
1400static void qib_postinit_cleanup(struct qib_devdata *dd)
1401{
1402
1403
1404
1405
1406
1407
1408
1409 if (dd->f_cleanup)
1410 dd->f_cleanup(dd);
1411
1412 qib_pcie_ddcleanup(dd);
1413
1414 cleanup_device_data(dd);
1415
1416 qib_free_devdata(dd);
1417}
1418
1419static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1420{
1421 int ret, j, pidx, initfail;
1422 struct qib_devdata *dd = NULL;
1423
1424 ret = qib_pcie_init(pdev, ent);
1425 if (ret)
1426 goto bail;
1427
1428
1429
1430
1431
1432 switch (ent->device) {
1433 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1434#ifdef CONFIG_PCI_MSI
1435 dd = qib_init_iba6120_funcs(pdev, ent);
1436#else
1437 qib_early_err(&pdev->dev,
1438 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1439 ent->device);
1440 dd = ERR_PTR(-ENODEV);
1441#endif
1442 break;
1443
1444 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1445 dd = qib_init_iba7220_funcs(pdev, ent);
1446 break;
1447
1448 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1449 dd = qib_init_iba7322_funcs(pdev, ent);
1450 break;
1451
1452 default:
1453 qib_early_err(&pdev->dev,
1454 "Failing on unknown Intel deviceid 0x%x\n",
1455 ent->device);
1456 ret = -ENODEV;
1457 }
1458
1459 if (IS_ERR(dd))
1460 ret = PTR_ERR(dd);
1461 if (ret)
1462 goto bail;
1463
1464 ret = qib_create_workqueues(dd);
1465 if (ret)
1466 goto bail;
1467
1468
1469 initfail = qib_init(dd, 0);
1470
1471 ret = qib_register_ib_device(dd);
1472
1473
1474
1475
1476
1477
1478
1479 if (!qib_mini_init && !initfail && !ret)
1480 dd->flags |= QIB_INITTED;
1481
1482 j = qib_device_create(dd);
1483 if (j)
1484 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1485 j = qibfs_add(dd);
1486 if (j)
1487 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1488 -j);
1489
1490 if (qib_mini_init || initfail || ret) {
1491 qib_stop_timers(dd);
1492 flush_workqueue(ib_wq);
1493 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1494 dd->f_quiet_serdes(dd->pport + pidx);
1495 if (qib_mini_init)
1496 goto bail;
1497 if (!j) {
1498 (void) qibfs_remove(dd);
1499 qib_device_remove(dd);
1500 }
1501 if (!ret)
1502 qib_unregister_ib_device(dd);
1503 qib_postinit_cleanup(dd);
1504 if (initfail)
1505 ret = initfail;
1506 goto bail;
1507 }
1508
1509 ret = qib_enable_wc(dd);
1510 if (ret) {
1511 qib_dev_err(dd,
1512 "Write combining not enabled (err %d): performance may be poor\n",
1513 -ret);
1514 ret = 0;
1515 }
1516
1517 qib_verify_pioperf(dd);
1518bail:
1519 return ret;
1520}
1521
1522static void qib_remove_one(struct pci_dev *pdev)
1523{
1524 struct qib_devdata *dd = pci_get_drvdata(pdev);
1525 int ret;
1526
1527
1528 qib_unregister_ib_device(dd);
1529
1530
1531
1532
1533
1534 if (!qib_mini_init)
1535 qib_shutdown_device(dd);
1536
1537 qib_stop_timers(dd);
1538
1539
1540 flush_workqueue(ib_wq);
1541
1542 ret = qibfs_remove(dd);
1543 if (ret)
1544 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1545 -ret);
1546
1547 qib_device_remove(dd);
1548
1549 qib_postinit_cleanup(dd);
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1562{
1563 unsigned amt;
1564 int old_node_id;
1565
1566 if (!rcd->rcvhdrq) {
1567 dma_addr_t phys_hdrqtail;
1568 gfp_t gfp_flags;
1569
1570 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1571 sizeof(u32), PAGE_SIZE);
1572 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1573 GFP_USER : GFP_KERNEL;
1574
1575 old_node_id = dev_to_node(&dd->pcidev->dev);
1576 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1577 rcd->rcvhdrq = dma_alloc_coherent(
1578 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1579 gfp_flags | __GFP_COMP);
1580 set_dev_node(&dd->pcidev->dev, old_node_id);
1581
1582 if (!rcd->rcvhdrq) {
1583 qib_dev_err(dd,
1584 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1585 amt, rcd->ctxt);
1586 goto bail;
1587 }
1588
1589 if (rcd->ctxt >= dd->first_user_ctxt) {
1590 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1591 if (!rcd->user_event_mask)
1592 goto bail_free_hdrq;
1593 }
1594
1595 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1596 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1597 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1598 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1599 gfp_flags);
1600 set_dev_node(&dd->pcidev->dev, old_node_id);
1601 if (!rcd->rcvhdrtail_kvaddr)
1602 goto bail_free;
1603 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1604 }
1605
1606 rcd->rcvhdrq_size = amt;
1607 }
1608
1609
1610 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1611 if (rcd->rcvhdrtail_kvaddr)
1612 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1613 return 0;
1614
1615bail_free:
1616 qib_dev_err(dd,
1617 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1618 rcd->ctxt);
1619 vfree(rcd->user_event_mask);
1620 rcd->user_event_mask = NULL;
1621bail_free_hdrq:
1622 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1623 rcd->rcvhdrq_phys);
1624 rcd->rcvhdrq = NULL;
1625bail:
1626 return -ENOMEM;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1639{
1640 struct qib_devdata *dd = rcd->dd;
1641 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1642 size_t size;
1643 gfp_t gfp_flags;
1644 int old_node_id;
1645
1646
1647
1648
1649
1650
1651
1652 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1653
1654 egrcnt = rcd->rcvegrcnt;
1655 egroff = rcd->rcvegr_tid_base;
1656 egrsize = dd->rcvegrbufsize;
1657
1658 chunk = rcd->rcvegrbuf_chunks;
1659 egrperchunk = rcd->rcvegrbufs_perchunk;
1660 size = rcd->rcvegrbuf_size;
1661 if (!rcd->rcvegrbuf) {
1662 rcd->rcvegrbuf =
1663 kzalloc_node(chunk * sizeof(rcd->rcvegrbuf[0]),
1664 GFP_KERNEL, rcd->node_id);
1665 if (!rcd->rcvegrbuf)
1666 goto bail;
1667 }
1668 if (!rcd->rcvegrbuf_phys) {
1669 rcd->rcvegrbuf_phys =
1670 kmalloc_array_node(chunk,
1671 sizeof(rcd->rcvegrbuf_phys[0]),
1672 GFP_KERNEL, rcd->node_id);
1673 if (!rcd->rcvegrbuf_phys)
1674 goto bail_rcvegrbuf;
1675 }
1676 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1677 if (rcd->rcvegrbuf[e])
1678 continue;
1679
1680 old_node_id = dev_to_node(&dd->pcidev->dev);
1681 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1682 rcd->rcvegrbuf[e] =
1683 dma_alloc_coherent(&dd->pcidev->dev, size,
1684 &rcd->rcvegrbuf_phys[e],
1685 gfp_flags);
1686 set_dev_node(&dd->pcidev->dev, old_node_id);
1687 if (!rcd->rcvegrbuf[e])
1688 goto bail_rcvegrbuf_phys;
1689 }
1690
1691 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1692
1693 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1694 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1695 unsigned i;
1696
1697
1698 memset(rcd->rcvegrbuf[chunk], 0, size);
1699
1700 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1701 dd->f_put_tid(dd, e + egroff +
1702 (u64 __iomem *)
1703 ((char __iomem *)
1704 dd->kregbase +
1705 dd->rcvegrbase),
1706 RCVHQ_RCV_TYPE_EAGER, pa);
1707 pa += egrsize;
1708 }
1709 cond_resched();
1710 }
1711
1712 return 0;
1713
1714bail_rcvegrbuf_phys:
1715 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1716 dma_free_coherent(&dd->pcidev->dev, size,
1717 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1718 kfree(rcd->rcvegrbuf_phys);
1719 rcd->rcvegrbuf_phys = NULL;
1720bail_rcvegrbuf:
1721 kfree(rcd->rcvegrbuf);
1722 rcd->rcvegrbuf = NULL;
1723bail:
1724 return -ENOMEM;
1725}
1726
1727
1728
1729
1730
1731
1732
1733int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1734{
1735 u64 __iomem *qib_kregbase = NULL;
1736 void __iomem *qib_piobase = NULL;
1737 u64 __iomem *qib_userbase = NULL;
1738 u64 qib_kreglen;
1739 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1740 u64 qib_pio4koffset = dd->piobufbase >> 32;
1741 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1742 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1743 u64 qib_physaddr = dd->physaddr;
1744 u64 qib_piolen;
1745 u64 qib_userlen = 0;
1746
1747
1748
1749
1750
1751
1752 iounmap(dd->kregbase);
1753 dd->kregbase = NULL;
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 if (dd->piobcnt4k == 0) {
1765 qib_kreglen = qib_pio2koffset;
1766 qib_piolen = qib_pio2klen;
1767 } else if (qib_pio2koffset < qib_pio4koffset) {
1768 qib_kreglen = qib_pio2koffset;
1769 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1770 } else {
1771 qib_kreglen = qib_pio4koffset;
1772 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1773 }
1774 qib_piolen += vl15buflen;
1775
1776 if (dd->uregbase > qib_kreglen)
1777 qib_userlen = dd->ureg_align * dd->cfgctxts;
1778
1779
1780 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1781 if (!qib_kregbase)
1782 goto bail;
1783
1784 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1785 if (!qib_piobase)
1786 goto bail_kregbase;
1787
1788 if (qib_userlen) {
1789 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1790 qib_userlen);
1791 if (!qib_userbase)
1792 goto bail_piobase;
1793 }
1794
1795 dd->kregbase = qib_kregbase;
1796 dd->kregend = (u64 __iomem *)
1797 ((char __iomem *) qib_kregbase + qib_kreglen);
1798 dd->piobase = qib_piobase;
1799 dd->pio2kbase = (void __iomem *)
1800 (((char __iomem *) dd->piobase) +
1801 qib_pio2koffset - qib_kreglen);
1802 if (dd->piobcnt4k)
1803 dd->pio4kbase = (void __iomem *)
1804 (((char __iomem *) dd->piobase) +
1805 qib_pio4koffset - qib_kreglen);
1806 if (qib_userlen)
1807
1808 dd->userbase = qib_userbase;
1809 return 0;
1810
1811bail_piobase:
1812 iounmap(qib_piobase);
1813bail_kregbase:
1814 iounmap(qib_kregbase);
1815bail:
1816 return -ENOMEM;
1817}
1818