1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39#include <linux/module.h>
40#include <linux/printk.h>
41#ifdef CONFIG_INFINIBAND_QIB_DCA
42#include <linux/dca.h>
43#endif
44#include <rdma/rdma_vt.h>
45
46#include "qib.h"
47#include "qib_common.h"
48#include "qib_mad.h"
49#ifdef CONFIG_DEBUG_FS
50#include "qib_debugfs.h"
51#include "qib_verbs.h"
52#endif
53
54#undef pr_fmt
55#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
56
57
58
59
60#define QIB_MIN_USER_CTXT_BUFCNT 7
61
62#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
63#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
64#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
65
66
67
68
69
70ushort qib_cfgctxts;
71module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
72MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
73
74unsigned qib_numa_aware;
75module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
76MODULE_PARM_DESC(numa_aware,
77 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
78
79
80
81
82
83ushort qib_mini_init;
84module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
85MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
86
87unsigned qib_n_krcv_queues;
88module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
89MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
90
91unsigned qib_cc_table_size;
92module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
93MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
94
95static void verify_interrupt(struct timer_list *);
96
97DEFINE_XARRAY_FLAGS(qib_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
98u32 qib_cpulist_count;
99unsigned long *qib_cpulist;
100
101
102void qib_set_ctxtcnt(struct qib_devdata *dd)
103{
104 if (!qib_cfgctxts) {
105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
106 if (dd->cfgctxts > dd->ctxtcnt)
107 dd->cfgctxts = dd->ctxtcnt;
108 } else if (qib_cfgctxts < dd->num_pports)
109 dd->cfgctxts = dd->ctxtcnt;
110 else if (qib_cfgctxts <= dd->ctxtcnt)
111 dd->cfgctxts = qib_cfgctxts;
112 else
113 dd->cfgctxts = dd->ctxtcnt;
114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
115 dd->cfgctxts - dd->first_user_ctxt;
116}
117
118
119
120
121int qib_create_ctxts(struct qib_devdata *dd)
122{
123 unsigned i;
124 int local_node_id = pcibus_to_node(dd->pcidev->bus);
125
126 if (local_node_id < 0)
127 local_node_id = numa_node_id();
128 dd->assigned_node_id = local_node_id;
129
130
131
132
133
134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
135 if (!dd->rcd)
136 return -ENOMEM;
137
138
139 for (i = 0; i < dd->first_user_ctxt; ++i) {
140 struct qib_pportdata *ppd;
141 struct qib_ctxtdata *rcd;
142
143 if (dd->skip_kctxt_mask & (1 << i))
144 continue;
145
146 ppd = dd->pport + (i % dd->num_pports);
147
148 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
149 if (!rcd) {
150 qib_dev_err(dd,
151 "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
152 kfree(dd->rcd);
153 dd->rcd = NULL;
154 return -ENOMEM;
155 }
156 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
157 rcd->seq_cnt = 1;
158 }
159 return 0;
160}
161
162
163
164
165struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
166 int node_id)
167{
168 struct qib_devdata *dd = ppd->dd;
169 struct qib_ctxtdata *rcd;
170
171 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
172 if (rcd) {
173 INIT_LIST_HEAD(&rcd->qp_wait_list);
174 rcd->node_id = node_id;
175 rcd->ppd = ppd;
176 rcd->dd = dd;
177 rcd->cnt = 1;
178 rcd->ctxt = ctxt;
179 dd->rcd[ctxt] = rcd;
180#ifdef CONFIG_DEBUG_FS
181 if (ctxt < dd->first_user_ctxt) {
182 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
183 GFP_KERNEL, node_id);
184 if (!rcd->opstats) {
185 kfree(rcd);
186 qib_dev_err(dd,
187 "Unable to allocate per ctxt stats buffer\n");
188 return NULL;
189 }
190 }
191#endif
192 dd->f_init_ctxt(rcd);
193
194
195
196
197
198
199
200
201
202
203
204
205 rcd->rcvegrbuf_size = 0x8000;
206 rcd->rcvegrbufs_perchunk =
207 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
208 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
209 rcd->rcvegrbufs_perchunk - 1) /
210 rcd->rcvegrbufs_perchunk;
211 rcd->rcvegrbufs_perchunk_shift =
212 ilog2(rcd->rcvegrbufs_perchunk);
213 }
214 return rcd;
215}
216
217
218
219
220int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
221 u8 hw_pidx, u8 port)
222{
223 int size;
224
225 ppd->dd = dd;
226 ppd->hw_pidx = hw_pidx;
227 ppd->port = port;
228
229 spin_lock_init(&ppd->sdma_lock);
230 spin_lock_init(&ppd->lflags_lock);
231 spin_lock_init(&ppd->cc_shadow_lock);
232 init_waitqueue_head(&ppd->state_wait);
233
234 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
235
236 ppd->qib_wq = NULL;
237 ppd->ibport_data.pmastats =
238 alloc_percpu(struct qib_pma_counters);
239 if (!ppd->ibport_data.pmastats)
240 return -ENOMEM;
241 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
242 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
243 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
244 if (!(ppd->ibport_data.rvp.rc_acks) ||
245 !(ppd->ibport_data.rvp.rc_qacks) ||
246 !(ppd->ibport_data.rvp.rc_delayed_comp))
247 return -ENOMEM;
248
249 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
250 goto bail;
251
252 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
253 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
254
255 ppd->cc_max_table_entries =
256 ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
257
258 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
259 * IB_CCT_ENTRIES;
260 ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
261 if (!ppd->ccti_entries)
262 goto bail;
263
264 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
265 ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
266 if (!ppd->congestion_entries)
267 goto bail_1;
268
269 size = sizeof(struct cc_table_shadow);
270 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
271 if (!ppd->ccti_entries_shadow)
272 goto bail_2;
273
274 size = sizeof(struct ib_cc_congestion_setting_attr);
275 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
276 if (!ppd->congestion_entries_shadow)
277 goto bail_3;
278
279 return 0;
280
281bail_3:
282 kfree(ppd->ccti_entries_shadow);
283 ppd->ccti_entries_shadow = NULL;
284bail_2:
285 kfree(ppd->congestion_entries);
286 ppd->congestion_entries = NULL;
287bail_1:
288 kfree(ppd->ccti_entries);
289 ppd->ccti_entries = NULL;
290bail:
291
292 if (!qib_cc_table_size)
293 return 0;
294
295 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
296 qib_cc_table_size = 0;
297 qib_dev_err(dd,
298 "Congestion Control table size %d less than minimum %d for port %d\n",
299 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
300 }
301
302 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
303 port);
304 return 0;
305}
306
307static int init_pioavailregs(struct qib_devdata *dd)
308{
309 int ret, pidx;
310 u64 *status_page;
311
312 dd->pioavailregs_dma = dma_alloc_coherent(
313 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
314 GFP_KERNEL);
315 if (!dd->pioavailregs_dma) {
316 qib_dev_err(dd,
317 "failed to allocate PIOavail reg area in memory\n");
318 ret = -ENOMEM;
319 goto done;
320 }
321
322
323
324
325
326 status_page = (u64 *)
327 ((char *) dd->pioavailregs_dma +
328 ((2 * L1_CACHE_BYTES +
329 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
330
331 dd->devstatusp = status_page;
332 *status_page++ = 0;
333 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
334 dd->pport[pidx].statusp = status_page;
335 *status_page++ = 0;
336 }
337
338
339
340
341
342 dd->freezemsg = (char *) status_page;
343 *dd->freezemsg = 0;
344
345 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
346 dd->freezelen = PAGE_SIZE - ret;
347
348 ret = 0;
349
350done:
351 return ret;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365static void init_shadow_tids(struct qib_devdata *dd)
366{
367 struct page **pages;
368 dma_addr_t *addrs;
369
370 pages = vzalloc(array_size(sizeof(struct page *),
371 dd->cfgctxts * dd->rcvtidcnt));
372 if (!pages)
373 goto bail;
374
375 addrs = vzalloc(array_size(sizeof(dma_addr_t),
376 dd->cfgctxts * dd->rcvtidcnt));
377 if (!addrs)
378 goto bail_free;
379
380 dd->pageshadow = pages;
381 dd->physshadow = addrs;
382 return;
383
384bail_free:
385 vfree(pages);
386bail:
387 dd->pageshadow = NULL;
388}
389
390
391
392
393
394static int loadtime_init(struct qib_devdata *dd)
395{
396 int ret = 0;
397
398 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
399 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
400 qib_dev_err(dd,
401 "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
402 QIB_CHIP_SWVERSION,
403 (int)(dd->revision >>
404 QLOGIC_IB_R_SOFTWARE_SHIFT) &
405 QLOGIC_IB_R_SOFTWARE_MASK,
406 (unsigned long long) dd->revision);
407 ret = -ENOSYS;
408 goto done;
409 }
410
411 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
412 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
413
414 spin_lock_init(&dd->pioavail_lock);
415 spin_lock_init(&dd->sendctrl_lock);
416 spin_lock_init(&dd->uctxt_lock);
417 spin_lock_init(&dd->qib_diag_trans_lock);
418 spin_lock_init(&dd->eep_st_lock);
419 mutex_init(&dd->eep_lock);
420
421 if (qib_mini_init)
422 goto done;
423
424 ret = init_pioavailregs(dd);
425 init_shadow_tids(dd);
426
427 qib_get_eeprom_info(dd);
428
429
430 timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
431done:
432 return ret;
433}
434
435
436
437
438
439
440
441
442
443static int init_after_reset(struct qib_devdata *dd)
444{
445 int i;
446
447
448
449
450
451
452 for (i = 0; i < dd->num_pports; ++i) {
453
454
455
456
457 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
458 QIB_RCVCTRL_INTRAVAIL_DIS |
459 QIB_RCVCTRL_TAILUPD_DIS, -1);
460
461 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
462 QIB_SENDCTRL_AVAIL_DIS);
463 }
464
465 return 0;
466}
467
468static void enable_chip(struct qib_devdata *dd)
469{
470 u64 rcvmask;
471 int i;
472
473
474
475
476 for (i = 0; i < dd->num_pports; ++i)
477 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
478 QIB_SENDCTRL_AVAIL_ENB);
479
480
481
482
483 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
484 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
485 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
486 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
487 struct qib_ctxtdata *rcd = dd->rcd[i];
488
489 if (rcd)
490 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
491 }
492}
493
494static void verify_interrupt(struct timer_list *t)
495{
496 struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
497 u64 int_counter;
498
499 if (!dd)
500 return;
501
502
503
504
505
506 int_counter = qib_int_counter(dd) - dd->z_int_counter;
507 if (int_counter == 0) {
508 if (!dd->f_intr_fallback(dd))
509 dev_err(&dd->pcidev->dev,
510 "No interrupts detected, not usable.\n");
511 else
512 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
513 }
514}
515
516static void init_piobuf_state(struct qib_devdata *dd)
517{
518 int i, pidx;
519 u32 uctxts;
520
521
522
523
524
525
526
527
528
529 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
530 for (pidx = 0; pidx < dd->num_pports; ++pidx)
531 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
532
533
534
535
536
537
538
539 uctxts = dd->cfgctxts - dd->first_user_ctxt;
540 dd->ctxts_extrabuf = dd->pbufsctxt ?
541 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
542
543
544
545
546
547
548
549
550
551
552 for (i = 0; i < dd->pioavregs; i++) {
553 __le64 tmp;
554
555 tmp = dd->pioavailregs_dma[i];
556
557
558
559
560
561 dd->pioavailshadow[i] = le64_to_cpu(tmp);
562 }
563 while (i < ARRAY_SIZE(dd->pioavailshadow))
564 dd->pioavailshadow[i++] = 0;
565
566
567 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
568 TXCHK_CHG_TYPE_KERN, NULL);
569 dd->f_initvl15_bufs(dd);
570}
571
572
573
574
575
576static int qib_create_workqueues(struct qib_devdata *dd)
577{
578 int pidx;
579 struct qib_pportdata *ppd;
580
581 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
582 ppd = dd->pport + pidx;
583 if (!ppd->qib_wq) {
584 char wq_name[8];
585
586 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
587 dd->unit, pidx);
588 ppd->qib_wq = alloc_ordered_workqueue(wq_name,
589 WQ_MEM_RECLAIM);
590 if (!ppd->qib_wq)
591 goto wq_error;
592 }
593 }
594 return 0;
595wq_error:
596 pr_err("create_singlethread_workqueue failed for port %d\n",
597 pidx + 1);
598 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
599 ppd = dd->pport + pidx;
600 if (ppd->qib_wq) {
601 destroy_workqueue(ppd->qib_wq);
602 ppd->qib_wq = NULL;
603 }
604 }
605 return -ENOMEM;
606}
607
608static void qib_free_pportdata(struct qib_pportdata *ppd)
609{
610 free_percpu(ppd->ibport_data.pmastats);
611 free_percpu(ppd->ibport_data.rvp.rc_acks);
612 free_percpu(ppd->ibport_data.rvp.rc_qacks);
613 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
614 ppd->ibport_data.pmastats = NULL;
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632int qib_init(struct qib_devdata *dd, int reinit)
633{
634 int ret = 0, pidx, lastfail = 0;
635 u32 portok = 0;
636 unsigned i;
637 struct qib_ctxtdata *rcd;
638 struct qib_pportdata *ppd;
639 unsigned long flags;
640
641
642 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
643 ppd = dd->pport + pidx;
644 spin_lock_irqsave(&ppd->lflags_lock, flags);
645 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
646 QIBL_LINKDOWN | QIBL_LINKINIT |
647 QIBL_LINKV);
648 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
649 }
650
651 if (reinit)
652 ret = init_after_reset(dd);
653 else
654 ret = loadtime_init(dd);
655 if (ret)
656 goto done;
657
658
659 if (qib_mini_init)
660 return 0;
661
662 ret = dd->f_late_initreg(dd);
663 if (ret)
664 goto done;
665
666
667 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
668
669
670
671
672
673
674 rcd = dd->rcd[i];
675 if (!rcd)
676 continue;
677
678 lastfail = qib_create_rcvhdrq(dd, rcd);
679 if (!lastfail)
680 lastfail = qib_setup_eagerbufs(rcd);
681 if (lastfail)
682 qib_dev_err(dd,
683 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
684 }
685
686 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
687 int mtu;
688
689 if (lastfail)
690 ret = lastfail;
691 ppd = dd->pport + pidx;
692 mtu = ib_mtu_enum_to_int(qib_ibmtu);
693 if (mtu == -1) {
694 mtu = QIB_DEFAULT_MTU;
695 qib_ibmtu = 0;
696 }
697
698 ppd->init_ibmaxlen = min(mtu > 2048 ?
699 dd->piosize4k : dd->piosize2k,
700 dd->rcvegrbufsize +
701 (dd->rcvhdrentsize << 2));
702
703
704
705
706 ppd->ibmaxlen = ppd->init_ibmaxlen;
707 qib_set_mtu(ppd, mtu);
708
709 spin_lock_irqsave(&ppd->lflags_lock, flags);
710 ppd->lflags |= QIBL_IB_LINK_DISABLED;
711 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
712
713 lastfail = dd->f_bringup_serdes(ppd);
714 if (lastfail) {
715 qib_devinfo(dd->pcidev,
716 "Failed to bringup IB port %u\n", ppd->port);
717 lastfail = -ENETDOWN;
718 continue;
719 }
720
721 portok++;
722 }
723
724 if (!portok) {
725
726 if (!ret && lastfail)
727 ret = lastfail;
728 else if (!ret)
729 ret = -ENETDOWN;
730
731 }
732
733 enable_chip(dd);
734
735 init_piobuf_state(dd);
736
737done:
738 if (!ret) {
739
740 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
741 ppd = dd->pport + pidx;
742
743
744
745
746 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
747 QIB_STATUS_INITTED;
748 if (!ppd->link_speed_enabled)
749 continue;
750 if (dd->flags & QIB_HAS_SEND_DMA)
751 ret = qib_setup_sdma(ppd);
752 timer_setup(&ppd->hol_timer, qib_hol_event, 0);
753 ppd->hol_state = QIB_HOL_UP;
754 }
755
756
757 dd->f_set_intr_state(dd, 1);
758
759
760
761
762
763 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
764
765 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
766 }
767
768
769 return ret;
770}
771
772
773
774
775
776
777
778int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
779{
780 return -EOPNOTSUPP;
781}
782
783void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
784{
785}
786
787struct qib_devdata *qib_lookup(int unit)
788{
789 return xa_load(&qib_dev_table, unit);
790}
791
792
793
794
795
796static void qib_stop_timers(struct qib_devdata *dd)
797{
798 struct qib_pportdata *ppd;
799 int pidx;
800
801 if (dd->stats_timer.function)
802 del_timer_sync(&dd->stats_timer);
803 if (dd->intrchk_timer.function)
804 del_timer_sync(&dd->intrchk_timer);
805 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
806 ppd = dd->pport + pidx;
807 if (ppd->hol_timer.function)
808 del_timer_sync(&ppd->hol_timer);
809 if (ppd->led_override_timer.function) {
810 del_timer_sync(&ppd->led_override_timer);
811 atomic_set(&ppd->led_override_timer_active, 0);
812 }
813 if (ppd->symerr_clear_timer.function)
814 del_timer_sync(&ppd->symerr_clear_timer);
815 }
816}
817
818
819
820
821
822
823
824
825
826
827static void qib_shutdown_device(struct qib_devdata *dd)
828{
829 struct qib_pportdata *ppd;
830 unsigned pidx;
831
832 if (dd->flags & QIB_SHUTDOWN)
833 return;
834 dd->flags |= QIB_SHUTDOWN;
835
836 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
837 ppd = dd->pport + pidx;
838
839 spin_lock_irq(&ppd->lflags_lock);
840 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
841 QIBL_LINKARMED | QIBL_LINKACTIVE |
842 QIBL_LINKV);
843 spin_unlock_irq(&ppd->lflags_lock);
844 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
845 }
846 dd->flags &= ~QIB_INITTED;
847
848
849 dd->f_set_intr_state(dd, 0);
850
851 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
852 ppd = dd->pport + pidx;
853 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
854 QIB_RCVCTRL_CTXT_DIS |
855 QIB_RCVCTRL_INTRAVAIL_DIS |
856 QIB_RCVCTRL_PKEY_ENB, -1);
857
858
859
860
861 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
862 }
863
864
865
866
867
868 udelay(20);
869
870 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
871 ppd = dd->pport + pidx;
872 dd->f_setextled(ppd, 0);
873
874 if (dd->flags & QIB_HAS_SEND_DMA)
875 qib_teardown_sdma(ppd);
876
877 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
878 QIB_SENDCTRL_SEND_DIS);
879
880
881
882
883 dd->f_quiet_serdes(ppd);
884
885 if (ppd->qib_wq) {
886 destroy_workqueue(ppd->qib_wq);
887 ppd->qib_wq = NULL;
888 }
889 qib_free_pportdata(ppd);
890 }
891
892}
893
894
895
896
897
898
899
900
901
902
903
904
905void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
906{
907 if (!rcd)
908 return;
909
910 if (rcd->rcvhdrq) {
911 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
912 rcd->rcvhdrq, rcd->rcvhdrq_phys);
913 rcd->rcvhdrq = NULL;
914 if (rcd->rcvhdrtail_kvaddr) {
915 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
916 rcd->rcvhdrtail_kvaddr,
917 rcd->rcvhdrqtailaddr_phys);
918 rcd->rcvhdrtail_kvaddr = NULL;
919 }
920 }
921 if (rcd->rcvegrbuf) {
922 unsigned e;
923
924 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
925 void *base = rcd->rcvegrbuf[e];
926 size_t size = rcd->rcvegrbuf_size;
927
928 dma_free_coherent(&dd->pcidev->dev, size,
929 base, rcd->rcvegrbuf_phys[e]);
930 }
931 kfree(rcd->rcvegrbuf);
932 rcd->rcvegrbuf = NULL;
933 kfree(rcd->rcvegrbuf_phys);
934 rcd->rcvegrbuf_phys = NULL;
935 rcd->rcvegrbuf_chunks = 0;
936 }
937
938 kfree(rcd->tid_pg_list);
939 vfree(rcd->user_event_mask);
940 vfree(rcd->subctxt_uregbase);
941 vfree(rcd->subctxt_rcvegrbuf);
942 vfree(rcd->subctxt_rcvhdr_base);
943#ifdef CONFIG_DEBUG_FS
944 kfree(rcd->opstats);
945 rcd->opstats = NULL;
946#endif
947 kfree(rcd);
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963static void qib_verify_pioperf(struct qib_devdata *dd)
964{
965 u32 pbnum, cnt, lcnt;
966 u32 __iomem *piobuf;
967 u32 *addr;
968 u64 msecs, emsecs;
969
970 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
971 if (!piobuf) {
972 qib_devinfo(dd->pcidev,
973 "No PIObufs for checking perf, skipping\n");
974 return;
975 }
976
977
978
979
980
981 cnt = 1024;
982
983 addr = vmalloc(cnt);
984 if (!addr)
985 goto done;
986
987 preempt_disable();
988 msecs = 1 + jiffies_to_msecs(jiffies);
989 for (lcnt = 0; lcnt < 10000U; lcnt++) {
990
991 if (jiffies_to_msecs(jiffies) >= msecs)
992 break;
993 udelay(1);
994 }
995
996 dd->f_set_armlaunch(dd, 0);
997
998
999
1000
1001 writeq(0, piobuf);
1002 qib_flush_wc();
1003
1004
1005
1006
1007
1008
1009 msecs = jiffies_to_msecs(jiffies);
1010 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
1011 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
1012 emsecs = jiffies_to_msecs(jiffies) - msecs;
1013 }
1014
1015
1016 if (lcnt < (emsecs * 1024U))
1017 qib_dev_err(dd,
1018 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
1019 lcnt / (u32) emsecs);
1020
1021 preempt_enable();
1022
1023 vfree(addr);
1024
1025done:
1026
1027 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1028 qib_sendbuf_done(dd, pbnum);
1029 dd->f_set_armlaunch(dd, 1);
1030}
1031
1032void qib_free_devdata(struct qib_devdata *dd)
1033{
1034 unsigned long flags;
1035
1036 xa_lock_irqsave(&qib_dev_table, flags);
1037 __xa_erase(&qib_dev_table, dd->unit);
1038 xa_unlock_irqrestore(&qib_dev_table, flags);
1039
1040#ifdef CONFIG_DEBUG_FS
1041 qib_dbg_ibdev_exit(&dd->verbs_dev);
1042#endif
1043 free_percpu(dd->int_counter);
1044 rvt_dealloc_device(&dd->verbs_dev.rdi);
1045}
1046
1047u64 qib_int_counter(struct qib_devdata *dd)
1048{
1049 int cpu;
1050 u64 int_counter = 0;
1051
1052 for_each_possible_cpu(cpu)
1053 int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1054 return int_counter;
1055}
1056
1057u64 qib_sps_ints(void)
1058{
1059 unsigned long index, flags;
1060 struct qib_devdata *dd;
1061 u64 sps_ints = 0;
1062
1063 xa_lock_irqsave(&qib_dev_table, flags);
1064 xa_for_each(&qib_dev_table, index, dd) {
1065 sps_ints += qib_int_counter(dd);
1066 }
1067 xa_unlock_irqrestore(&qib_dev_table, flags);
1068 return sps_ints;
1069}
1070
1071
1072
1073
1074
1075
1076
1077struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1078{
1079 struct qib_devdata *dd;
1080 int ret, nports;
1081
1082
1083 nports = extra / sizeof(struct qib_pportdata);
1084 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1085 nports);
1086 if (!dd)
1087 return ERR_PTR(-ENOMEM);
1088
1089 ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b,
1090 GFP_KERNEL);
1091 if (ret < 0) {
1092 qib_early_err(&pdev->dev,
1093 "Could not allocate unit ID: error %d\n", -ret);
1094 goto bail;
1095 }
1096 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
1097
1098 dd->int_counter = alloc_percpu(u64);
1099 if (!dd->int_counter) {
1100 ret = -ENOMEM;
1101 qib_early_err(&pdev->dev,
1102 "Could not allocate per-cpu int_counter\n");
1103 goto bail;
1104 }
1105
1106 if (!qib_cpulist_count) {
1107 u32 count = num_online_cpus();
1108
1109 qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1110 GFP_KERNEL);
1111 if (qib_cpulist)
1112 qib_cpulist_count = count;
1113 }
1114#ifdef CONFIG_DEBUG_FS
1115 qib_dbg_ibdev_init(&dd->verbs_dev);
1116#endif
1117 return dd;
1118bail:
1119 if (!list_empty(&dd->list))
1120 list_del_init(&dd->list);
1121 rvt_dealloc_device(&dd->verbs_dev.rdi);
1122 return ERR_PTR(ret);
1123}
1124
1125
1126
1127
1128
1129
1130void qib_disable_after_error(struct qib_devdata *dd)
1131{
1132 if (dd->flags & QIB_INITTED) {
1133 u32 pidx;
1134
1135 dd->flags &= ~QIB_INITTED;
1136 if (dd->pport)
1137 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1138 struct qib_pportdata *ppd;
1139
1140 ppd = dd->pport + pidx;
1141 if (dd->flags & QIB_PRESENT) {
1142 qib_set_linkstate(ppd,
1143 QIB_IB_LINKDOWN_DISABLE);
1144 dd->f_setextled(ppd, 0);
1145 }
1146 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1147 }
1148 }
1149
1150
1151
1152
1153
1154
1155 if (dd->devstatusp)
1156 *dd->devstatusp |= QIB_STATUS_HWERROR;
1157}
1158
1159static void qib_remove_one(struct pci_dev *);
1160static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1161static void qib_shutdown_one(struct pci_dev *);
1162
1163#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1164#define PFX QIB_DRV_NAME ": "
1165
1166static const struct pci_device_id qib_pci_tbl[] = {
1167 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1168 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1169 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1170 { 0, }
1171};
1172
1173MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1174
1175static struct pci_driver qib_driver = {
1176 .name = QIB_DRV_NAME,
1177 .probe = qib_init_one,
1178 .remove = qib_remove_one,
1179 .shutdown = qib_shutdown_one,
1180 .id_table = qib_pci_tbl,
1181 .err_handler = &qib_pci_err_handler,
1182};
1183
1184#ifdef CONFIG_INFINIBAND_QIB_DCA
1185
1186static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1187static struct notifier_block dca_notifier = {
1188 .notifier_call = qib_notify_dca,
1189 .next = NULL,
1190 .priority = 0
1191};
1192
1193static int qib_notify_dca_device(struct device *device, void *data)
1194{
1195 struct qib_devdata *dd = dev_get_drvdata(device);
1196 unsigned long event = *(unsigned long *)data;
1197
1198 return dd->f_notify_dca(dd, event);
1199}
1200
1201static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1202 void *p)
1203{
1204 int rval;
1205
1206 rval = driver_for_each_device(&qib_driver.driver, NULL,
1207 &event, qib_notify_dca_device);
1208 return rval ? NOTIFY_BAD : NOTIFY_DONE;
1209}
1210
1211#endif
1212
1213
1214
1215
1216
1217static int __init qib_ib_init(void)
1218{
1219 int ret;
1220
1221 ret = qib_dev_init();
1222 if (ret)
1223 goto bail;
1224
1225
1226
1227
1228
1229#ifdef CONFIG_INFINIBAND_QIB_DCA
1230 dca_register_notify(&dca_notifier);
1231#endif
1232#ifdef CONFIG_DEBUG_FS
1233 qib_dbg_init();
1234#endif
1235 ret = pci_register_driver(&qib_driver);
1236 if (ret < 0) {
1237 pr_err("Unable to register driver: error %d\n", -ret);
1238 goto bail_dev;
1239 }
1240
1241
1242 if (qib_init_qibfs())
1243 pr_err("Unable to register ipathfs\n");
1244 goto bail;
1245
1246bail_dev:
1247#ifdef CONFIG_INFINIBAND_QIB_DCA
1248 dca_unregister_notify(&dca_notifier);
1249#endif
1250#ifdef CONFIG_DEBUG_FS
1251 qib_dbg_exit();
1252#endif
1253 qib_dev_cleanup();
1254bail:
1255 return ret;
1256}
1257
1258module_init(qib_ib_init);
1259
1260
1261
1262
1263static void __exit qib_ib_cleanup(void)
1264{
1265 int ret;
1266
1267 ret = qib_exit_qibfs();
1268 if (ret)
1269 pr_err(
1270 "Unable to cleanup counter filesystem: error %d\n",
1271 -ret);
1272
1273#ifdef CONFIG_INFINIBAND_QIB_DCA
1274 dca_unregister_notify(&dca_notifier);
1275#endif
1276 pci_unregister_driver(&qib_driver);
1277#ifdef CONFIG_DEBUG_FS
1278 qib_dbg_exit();
1279#endif
1280
1281 qib_cpulist_count = 0;
1282 kfree(qib_cpulist);
1283
1284 WARN_ON(!xa_empty(&qib_dev_table));
1285 qib_dev_cleanup();
1286}
1287
1288module_exit(qib_ib_cleanup);
1289
1290
1291static void cleanup_device_data(struct qib_devdata *dd)
1292{
1293 int ctxt;
1294 int pidx;
1295 struct qib_ctxtdata **tmp;
1296 unsigned long flags;
1297
1298
1299 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1300 if (dd->pport[pidx].statusp)
1301 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1302
1303 spin_lock(&dd->pport[pidx].cc_shadow_lock);
1304
1305 kfree(dd->pport[pidx].congestion_entries);
1306 dd->pport[pidx].congestion_entries = NULL;
1307 kfree(dd->pport[pidx].ccti_entries);
1308 dd->pport[pidx].ccti_entries = NULL;
1309 kfree(dd->pport[pidx].ccti_entries_shadow);
1310 dd->pport[pidx].ccti_entries_shadow = NULL;
1311 kfree(dd->pport[pidx].congestion_entries_shadow);
1312 dd->pport[pidx].congestion_entries_shadow = NULL;
1313
1314 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1315 }
1316
1317 qib_disable_wc(dd);
1318
1319 if (dd->pioavailregs_dma) {
1320 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1321 (void *) dd->pioavailregs_dma,
1322 dd->pioavailregs_phys);
1323 dd->pioavailregs_dma = NULL;
1324 }
1325
1326 if (dd->pageshadow) {
1327 struct page **tmpp = dd->pageshadow;
1328 dma_addr_t *tmpd = dd->physshadow;
1329 int i;
1330
1331 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1332 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1333 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1334
1335 for (i = ctxt_tidbase; i < maxtid; i++) {
1336 if (!tmpp[i])
1337 continue;
1338 pci_unmap_page(dd->pcidev, tmpd[i],
1339 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1340 qib_release_user_pages(&tmpp[i], 1);
1341 tmpp[i] = NULL;
1342 }
1343 }
1344
1345 dd->pageshadow = NULL;
1346 vfree(tmpp);
1347 dd->physshadow = NULL;
1348 vfree(tmpd);
1349 }
1350
1351
1352
1353
1354
1355
1356
1357
1358 spin_lock_irqsave(&dd->uctxt_lock, flags);
1359 tmp = dd->rcd;
1360 dd->rcd = NULL;
1361 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1362 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1363 struct qib_ctxtdata *rcd = tmp[ctxt];
1364
1365 tmp[ctxt] = NULL;
1366 qib_free_ctxtdata(dd, rcd);
1367 }
1368 kfree(tmp);
1369}
1370
1371
1372
1373
1374
1375static void qib_postinit_cleanup(struct qib_devdata *dd)
1376{
1377
1378
1379
1380
1381
1382
1383
1384 if (dd->f_cleanup)
1385 dd->f_cleanup(dd);
1386
1387 qib_pcie_ddcleanup(dd);
1388
1389 cleanup_device_data(dd);
1390
1391 qib_free_devdata(dd);
1392}
1393
1394static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1395{
1396 int ret, j, pidx, initfail;
1397 struct qib_devdata *dd = NULL;
1398
1399 ret = qib_pcie_init(pdev, ent);
1400 if (ret)
1401 goto bail;
1402
1403
1404
1405
1406
1407 switch (ent->device) {
1408 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1409#ifdef CONFIG_PCI_MSI
1410 dd = qib_init_iba6120_funcs(pdev, ent);
1411#else
1412 qib_early_err(&pdev->dev,
1413 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1414 ent->device);
1415 dd = ERR_PTR(-ENODEV);
1416#endif
1417 break;
1418
1419 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1420 dd = qib_init_iba7220_funcs(pdev, ent);
1421 break;
1422
1423 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1424 dd = qib_init_iba7322_funcs(pdev, ent);
1425 break;
1426
1427 default:
1428 qib_early_err(&pdev->dev,
1429 "Failing on unknown Intel deviceid 0x%x\n",
1430 ent->device);
1431 ret = -ENODEV;
1432 }
1433
1434 if (IS_ERR(dd))
1435 ret = PTR_ERR(dd);
1436 if (ret)
1437 goto bail;
1438
1439 ret = qib_create_workqueues(dd);
1440 if (ret)
1441 goto bail;
1442
1443
1444 initfail = qib_init(dd, 0);
1445
1446 ret = qib_register_ib_device(dd);
1447
1448
1449
1450
1451
1452
1453
1454 if (!qib_mini_init && !initfail && !ret)
1455 dd->flags |= QIB_INITTED;
1456
1457 j = qib_device_create(dd);
1458 if (j)
1459 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1460 j = qibfs_add(dd);
1461 if (j)
1462 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1463 -j);
1464
1465 if (qib_mini_init || initfail || ret) {
1466 qib_stop_timers(dd);
1467 flush_workqueue(ib_wq);
1468 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1469 dd->f_quiet_serdes(dd->pport + pidx);
1470 if (qib_mini_init)
1471 goto bail;
1472 if (!j) {
1473 (void) qibfs_remove(dd);
1474 qib_device_remove(dd);
1475 }
1476 if (!ret)
1477 qib_unregister_ib_device(dd);
1478 qib_postinit_cleanup(dd);
1479 if (initfail)
1480 ret = initfail;
1481 goto bail;
1482 }
1483
1484 ret = qib_enable_wc(dd);
1485 if (ret) {
1486 qib_dev_err(dd,
1487 "Write combining not enabled (err %d): performance may be poor\n",
1488 -ret);
1489 ret = 0;
1490 }
1491
1492 qib_verify_pioperf(dd);
1493bail:
1494 return ret;
1495}
1496
1497static void qib_remove_one(struct pci_dev *pdev)
1498{
1499 struct qib_devdata *dd = pci_get_drvdata(pdev);
1500 int ret;
1501
1502
1503 qib_unregister_ib_device(dd);
1504
1505
1506
1507
1508
1509 if (!qib_mini_init)
1510 qib_shutdown_device(dd);
1511
1512 qib_stop_timers(dd);
1513
1514
1515 flush_workqueue(ib_wq);
1516
1517 ret = qibfs_remove(dd);
1518 if (ret)
1519 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1520 -ret);
1521
1522 qib_device_remove(dd);
1523
1524 qib_postinit_cleanup(dd);
1525}
1526
1527static void qib_shutdown_one(struct pci_dev *pdev)
1528{
1529 struct qib_devdata *dd = pci_get_drvdata(pdev);
1530
1531 qib_shutdown_device(dd);
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1544{
1545 unsigned amt;
1546 int old_node_id;
1547
1548 if (!rcd->rcvhdrq) {
1549 dma_addr_t phys_hdrqtail;
1550 gfp_t gfp_flags;
1551
1552 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1553 sizeof(u32), PAGE_SIZE);
1554 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1555 GFP_USER : GFP_KERNEL;
1556
1557 old_node_id = dev_to_node(&dd->pcidev->dev);
1558 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1559 rcd->rcvhdrq = dma_alloc_coherent(
1560 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1561 gfp_flags | __GFP_COMP);
1562 set_dev_node(&dd->pcidev->dev, old_node_id);
1563
1564 if (!rcd->rcvhdrq) {
1565 qib_dev_err(dd,
1566 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1567 amt, rcd->ctxt);
1568 goto bail;
1569 }
1570
1571 if (rcd->ctxt >= dd->first_user_ctxt) {
1572 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1573 if (!rcd->user_event_mask)
1574 goto bail_free_hdrq;
1575 }
1576
1577 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1578 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1579 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1580 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1581 gfp_flags);
1582 set_dev_node(&dd->pcidev->dev, old_node_id);
1583 if (!rcd->rcvhdrtail_kvaddr)
1584 goto bail_free;
1585 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1586 }
1587
1588 rcd->rcvhdrq_size = amt;
1589 }
1590
1591
1592 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1593 if (rcd->rcvhdrtail_kvaddr)
1594 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1595 return 0;
1596
1597bail_free:
1598 qib_dev_err(dd,
1599 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1600 rcd->ctxt);
1601 vfree(rcd->user_event_mask);
1602 rcd->user_event_mask = NULL;
1603bail_free_hdrq:
1604 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1605 rcd->rcvhdrq_phys);
1606 rcd->rcvhdrq = NULL;
1607bail:
1608 return -ENOMEM;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1621{
1622 struct qib_devdata *dd = rcd->dd;
1623 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1624 size_t size;
1625 gfp_t gfp_flags;
1626 int old_node_id;
1627
1628
1629
1630
1631
1632
1633
1634 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1635
1636 egrcnt = rcd->rcvegrcnt;
1637 egroff = rcd->rcvegr_tid_base;
1638 egrsize = dd->rcvegrbufsize;
1639
1640 chunk = rcd->rcvegrbuf_chunks;
1641 egrperchunk = rcd->rcvegrbufs_perchunk;
1642 size = rcd->rcvegrbuf_size;
1643 if (!rcd->rcvegrbuf) {
1644 rcd->rcvegrbuf =
1645 kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
1646 GFP_KERNEL, rcd->node_id);
1647 if (!rcd->rcvegrbuf)
1648 goto bail;
1649 }
1650 if (!rcd->rcvegrbuf_phys) {
1651 rcd->rcvegrbuf_phys =
1652 kmalloc_array_node(chunk,
1653 sizeof(rcd->rcvegrbuf_phys[0]),
1654 GFP_KERNEL, rcd->node_id);
1655 if (!rcd->rcvegrbuf_phys)
1656 goto bail_rcvegrbuf;
1657 }
1658 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1659 if (rcd->rcvegrbuf[e])
1660 continue;
1661
1662 old_node_id = dev_to_node(&dd->pcidev->dev);
1663 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1664 rcd->rcvegrbuf[e] =
1665 dma_alloc_coherent(&dd->pcidev->dev, size,
1666 &rcd->rcvegrbuf_phys[e],
1667 gfp_flags);
1668 set_dev_node(&dd->pcidev->dev, old_node_id);
1669 if (!rcd->rcvegrbuf[e])
1670 goto bail_rcvegrbuf_phys;
1671 }
1672
1673 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1674
1675 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1676 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1677 unsigned i;
1678
1679
1680 memset(rcd->rcvegrbuf[chunk], 0, size);
1681
1682 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1683 dd->f_put_tid(dd, e + egroff +
1684 (u64 __iomem *)
1685 ((char __iomem *)
1686 dd->kregbase +
1687 dd->rcvegrbase),
1688 RCVHQ_RCV_TYPE_EAGER, pa);
1689 pa += egrsize;
1690 }
1691 cond_resched();
1692 }
1693
1694 return 0;
1695
1696bail_rcvegrbuf_phys:
1697 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1698 dma_free_coherent(&dd->pcidev->dev, size,
1699 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1700 kfree(rcd->rcvegrbuf_phys);
1701 rcd->rcvegrbuf_phys = NULL;
1702bail_rcvegrbuf:
1703 kfree(rcd->rcvegrbuf);
1704 rcd->rcvegrbuf = NULL;
1705bail:
1706 return -ENOMEM;
1707}
1708
1709
1710
1711
1712
1713
1714
1715int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1716{
1717 u64 __iomem *qib_kregbase = NULL;
1718 void __iomem *qib_piobase = NULL;
1719 u64 __iomem *qib_userbase = NULL;
1720 u64 qib_kreglen;
1721 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1722 u64 qib_pio4koffset = dd->piobufbase >> 32;
1723 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1724 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1725 u64 qib_physaddr = dd->physaddr;
1726 u64 qib_piolen;
1727 u64 qib_userlen = 0;
1728
1729
1730
1731
1732
1733
1734 iounmap(dd->kregbase);
1735 dd->kregbase = NULL;
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (dd->piobcnt4k == 0) {
1747 qib_kreglen = qib_pio2koffset;
1748 qib_piolen = qib_pio2klen;
1749 } else if (qib_pio2koffset < qib_pio4koffset) {
1750 qib_kreglen = qib_pio2koffset;
1751 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1752 } else {
1753 qib_kreglen = qib_pio4koffset;
1754 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1755 }
1756 qib_piolen += vl15buflen;
1757
1758 if (dd->uregbase > qib_kreglen)
1759 qib_userlen = dd->ureg_align * dd->cfgctxts;
1760
1761
1762 qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
1763 if (!qib_kregbase)
1764 goto bail;
1765
1766 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1767 if (!qib_piobase)
1768 goto bail_kregbase;
1769
1770 if (qib_userlen) {
1771 qib_userbase = ioremap(qib_physaddr + dd->uregbase,
1772 qib_userlen);
1773 if (!qib_userbase)
1774 goto bail_piobase;
1775 }
1776
1777 dd->kregbase = qib_kregbase;
1778 dd->kregend = (u64 __iomem *)
1779 ((char __iomem *) qib_kregbase + qib_kreglen);
1780 dd->piobase = qib_piobase;
1781 dd->pio2kbase = (void __iomem *)
1782 (((char __iomem *) dd->piobase) +
1783 qib_pio2koffset - qib_kreglen);
1784 if (dd->piobcnt4k)
1785 dd->pio4kbase = (void __iomem *)
1786 (((char __iomem *) dd->piobase) +
1787 qib_pio4koffset - qib_kreglen);
1788 if (qib_userlen)
1789
1790 dd->userbase = qib_userbase;
1791 return 0;
1792
1793bail_piobase:
1794 iounmap(qib_piobase);
1795bail_kregbase:
1796 iounmap(qib_kregbase);
1797bail:
1798 return -ENOMEM;
1799}
1800