1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39#include <linux/idr.h>
40#include <linux/module.h>
41#include <linux/printk.h>
42#ifdef CONFIG_INFINIBAND_QIB_DCA
43#include <linux/dca.h>
44#endif
45#include <rdma/rdma_vt.h>
46
47#include "qib.h"
48#include "qib_common.h"
49#include "qib_mad.h"
50#ifdef CONFIG_DEBUG_FS
51#include "qib_debugfs.h"
52#include "qib_verbs.h"
53#endif
54
55#undef pr_fmt
56#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
57
58
59
60
61#define QIB_MIN_USER_CTXT_BUFCNT 7
62
63#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
64#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
65#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
66
67
68
69
70
71ushort qib_cfgctxts;
72module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
73MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
74
75unsigned qib_numa_aware;
76module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
77MODULE_PARM_DESC(numa_aware,
78 "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
79
80
81
82
83
84ushort qib_mini_init;
85module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
86MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
87
88unsigned qib_n_krcv_queues;
89module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
90MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
91
92unsigned qib_cc_table_size;
93module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
94MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
95
96
97
98
99
100
101unsigned qib_wc_pat = 1;
102module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
103MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
104
105static void verify_interrupt(struct timer_list *);
106
107static struct idr qib_unit_table;
108u32 qib_cpulist_count;
109unsigned long *qib_cpulist;
110
111
112void qib_set_ctxtcnt(struct qib_devdata *dd)
113{
114 if (!qib_cfgctxts) {
115 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
116 if (dd->cfgctxts > dd->ctxtcnt)
117 dd->cfgctxts = dd->ctxtcnt;
118 } else if (qib_cfgctxts < dd->num_pports)
119 dd->cfgctxts = dd->ctxtcnt;
120 else if (qib_cfgctxts <= dd->ctxtcnt)
121 dd->cfgctxts = qib_cfgctxts;
122 else
123 dd->cfgctxts = dd->ctxtcnt;
124 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
125 dd->cfgctxts - dd->first_user_ctxt;
126}
127
128
129
130
131int qib_create_ctxts(struct qib_devdata *dd)
132{
133 unsigned i;
134 int local_node_id = pcibus_to_node(dd->pcidev->bus);
135
136 if (local_node_id < 0)
137 local_node_id = numa_node_id();
138 dd->assigned_node_id = local_node_id;
139
140
141
142
143
144 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
145 if (!dd->rcd)
146 return -ENOMEM;
147
148
149 for (i = 0; i < dd->first_user_ctxt; ++i) {
150 struct qib_pportdata *ppd;
151 struct qib_ctxtdata *rcd;
152
153 if (dd->skip_kctxt_mask & (1 << i))
154 continue;
155
156 ppd = dd->pport + (i % dd->num_pports);
157
158 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
159 if (!rcd) {
160 qib_dev_err(dd,
161 "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
162 kfree(dd->rcd);
163 dd->rcd = NULL;
164 return -ENOMEM;
165 }
166 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
167 rcd->seq_cnt = 1;
168 }
169 return 0;
170}
171
172
173
174
175struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
176 int node_id)
177{
178 struct qib_devdata *dd = ppd->dd;
179 struct qib_ctxtdata *rcd;
180
181 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
182 if (rcd) {
183 INIT_LIST_HEAD(&rcd->qp_wait_list);
184 rcd->node_id = node_id;
185 rcd->ppd = ppd;
186 rcd->dd = dd;
187 rcd->cnt = 1;
188 rcd->ctxt = ctxt;
189 dd->rcd[ctxt] = rcd;
190#ifdef CONFIG_DEBUG_FS
191 if (ctxt < dd->first_user_ctxt) {
192 rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
193 GFP_KERNEL, node_id);
194 if (!rcd->opstats) {
195 kfree(rcd);
196 qib_dev_err(dd,
197 "Unable to allocate per ctxt stats buffer\n");
198 return NULL;
199 }
200 }
201#endif
202 dd->f_init_ctxt(rcd);
203
204
205
206
207
208
209
210
211
212
213
214
215 rcd->rcvegrbuf_size = 0x8000;
216 rcd->rcvegrbufs_perchunk =
217 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
218 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
219 rcd->rcvegrbufs_perchunk - 1) /
220 rcd->rcvegrbufs_perchunk;
221 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
222 rcd->rcvegrbufs_perchunk_shift =
223 ilog2(rcd->rcvegrbufs_perchunk);
224 }
225 return rcd;
226}
227
228
229
230
231int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
232 u8 hw_pidx, u8 port)
233{
234 int size;
235
236 ppd->dd = dd;
237 ppd->hw_pidx = hw_pidx;
238 ppd->port = port;
239
240 spin_lock_init(&ppd->sdma_lock);
241 spin_lock_init(&ppd->lflags_lock);
242 spin_lock_init(&ppd->cc_shadow_lock);
243 init_waitqueue_head(&ppd->state_wait);
244
245 timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
246
247 ppd->qib_wq = NULL;
248 ppd->ibport_data.pmastats =
249 alloc_percpu(struct qib_pma_counters);
250 if (!ppd->ibport_data.pmastats)
251 return -ENOMEM;
252 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
253 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
254 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
255 if (!(ppd->ibport_data.rvp.rc_acks) ||
256 !(ppd->ibport_data.rvp.rc_qacks) ||
257 !(ppd->ibport_data.rvp.rc_delayed_comp))
258 return -ENOMEM;
259
260 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
261 goto bail;
262
263 ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
264 IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
265
266 ppd->cc_max_table_entries =
267 ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
268
269 size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
270 * IB_CCT_ENTRIES;
271 ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
272 if (!ppd->ccti_entries)
273 goto bail;
274
275 size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
276 ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
277 if (!ppd->congestion_entries)
278 goto bail_1;
279
280 size = sizeof(struct cc_table_shadow);
281 ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
282 if (!ppd->ccti_entries_shadow)
283 goto bail_2;
284
285 size = sizeof(struct ib_cc_congestion_setting_attr);
286 ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
287 if (!ppd->congestion_entries_shadow)
288 goto bail_3;
289
290 return 0;
291
292bail_3:
293 kfree(ppd->ccti_entries_shadow);
294 ppd->ccti_entries_shadow = NULL;
295bail_2:
296 kfree(ppd->congestion_entries);
297 ppd->congestion_entries = NULL;
298bail_1:
299 kfree(ppd->ccti_entries);
300 ppd->ccti_entries = NULL;
301bail:
302
303 if (!qib_cc_table_size)
304 return 0;
305
306 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
307 qib_cc_table_size = 0;
308 qib_dev_err(dd,
309 "Congestion Control table size %d less than minimum %d for port %d\n",
310 qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
311 }
312
313 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
314 port);
315 return 0;
316}
317
318static int init_pioavailregs(struct qib_devdata *dd)
319{
320 int ret, pidx;
321 u64 *status_page;
322
323 dd->pioavailregs_dma = dma_alloc_coherent(
324 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
325 GFP_KERNEL);
326 if (!dd->pioavailregs_dma) {
327 qib_dev_err(dd,
328 "failed to allocate PIOavail reg area in memory\n");
329 ret = -ENOMEM;
330 goto done;
331 }
332
333
334
335
336
337 status_page = (u64 *)
338 ((char *) dd->pioavailregs_dma +
339 ((2 * L1_CACHE_BYTES +
340 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
341
342 dd->devstatusp = status_page;
343 *status_page++ = 0;
344 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
345 dd->pport[pidx].statusp = status_page;
346 *status_page++ = 0;
347 }
348
349
350
351
352
353 dd->freezemsg = (char *) status_page;
354 *dd->freezemsg = 0;
355
356 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
357 dd->freezelen = PAGE_SIZE - ret;
358
359 ret = 0;
360
361done:
362 return ret;
363}
364
365
366
367
368
369
370
371
372
373
374
375
376static void init_shadow_tids(struct qib_devdata *dd)
377{
378 struct page **pages;
379 dma_addr_t *addrs;
380
381 pages = vzalloc(array_size(sizeof(struct page *),
382 dd->cfgctxts * dd->rcvtidcnt));
383 if (!pages)
384 goto bail;
385
386 addrs = vzalloc(array_size(sizeof(dma_addr_t),
387 dd->cfgctxts * dd->rcvtidcnt));
388 if (!addrs)
389 goto bail_free;
390
391 dd->pageshadow = pages;
392 dd->physshadow = addrs;
393 return;
394
395bail_free:
396 vfree(pages);
397bail:
398 dd->pageshadow = NULL;
399}
400
401
402
403
404
405static int loadtime_init(struct qib_devdata *dd)
406{
407 int ret = 0;
408
409 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
410 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
411 qib_dev_err(dd,
412 "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
413 QIB_CHIP_SWVERSION,
414 (int)(dd->revision >>
415 QLOGIC_IB_R_SOFTWARE_SHIFT) &
416 QLOGIC_IB_R_SOFTWARE_MASK,
417 (unsigned long long) dd->revision);
418 ret = -ENOSYS;
419 goto done;
420 }
421
422 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
423 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
424
425 spin_lock_init(&dd->pioavail_lock);
426 spin_lock_init(&dd->sendctrl_lock);
427 spin_lock_init(&dd->uctxt_lock);
428 spin_lock_init(&dd->qib_diag_trans_lock);
429 spin_lock_init(&dd->eep_st_lock);
430 mutex_init(&dd->eep_lock);
431
432 if (qib_mini_init)
433 goto done;
434
435 ret = init_pioavailregs(dd);
436 init_shadow_tids(dd);
437
438 qib_get_eeprom_info(dd);
439
440
441 timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
442done:
443 return ret;
444}
445
446
447
448
449
450
451
452
453
454static int init_after_reset(struct qib_devdata *dd)
455{
456 int i;
457
458
459
460
461
462
463 for (i = 0; i < dd->num_pports; ++i) {
464
465
466
467
468 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
469 QIB_RCVCTRL_INTRAVAIL_DIS |
470 QIB_RCVCTRL_TAILUPD_DIS, -1);
471
472 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
473 QIB_SENDCTRL_AVAIL_DIS);
474 }
475
476 return 0;
477}
478
479static void enable_chip(struct qib_devdata *dd)
480{
481 u64 rcvmask;
482 int i;
483
484
485
486
487 for (i = 0; i < dd->num_pports; ++i)
488 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
489 QIB_SENDCTRL_AVAIL_ENB);
490
491
492
493
494 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
495 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
496 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
497 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
498 struct qib_ctxtdata *rcd = dd->rcd[i];
499
500 if (rcd)
501 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
502 }
503}
504
505static void verify_interrupt(struct timer_list *t)
506{
507 struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
508 u64 int_counter;
509
510 if (!dd)
511 return;
512
513
514
515
516
517 int_counter = qib_int_counter(dd) - dd->z_int_counter;
518 if (int_counter == 0) {
519 if (!dd->f_intr_fallback(dd))
520 dev_err(&dd->pcidev->dev,
521 "No interrupts detected, not usable.\n");
522 else
523 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
524 }
525}
526
527static void init_piobuf_state(struct qib_devdata *dd)
528{
529 int i, pidx;
530 u32 uctxts;
531
532
533
534
535
536
537
538
539
540 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
541 for (pidx = 0; pidx < dd->num_pports; ++pidx)
542 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
543
544
545
546
547
548
549
550 uctxts = dd->cfgctxts - dd->first_user_ctxt;
551 dd->ctxts_extrabuf = dd->pbufsctxt ?
552 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
553
554
555
556
557
558
559
560
561
562
563 for (i = 0; i < dd->pioavregs; i++) {
564 __le64 tmp;
565
566 tmp = dd->pioavailregs_dma[i];
567
568
569
570
571
572 dd->pioavailshadow[i] = le64_to_cpu(tmp);
573 }
574 while (i < ARRAY_SIZE(dd->pioavailshadow))
575 dd->pioavailshadow[i++] = 0;
576
577
578 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
579 TXCHK_CHG_TYPE_KERN, NULL);
580 dd->f_initvl15_bufs(dd);
581}
582
583
584
585
586
587static int qib_create_workqueues(struct qib_devdata *dd)
588{
589 int pidx;
590 struct qib_pportdata *ppd;
591
592 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
593 ppd = dd->pport + pidx;
594 if (!ppd->qib_wq) {
595 char wq_name[8];
596
597 snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
598 dd->unit, pidx);
599 ppd->qib_wq = alloc_ordered_workqueue(wq_name,
600 WQ_MEM_RECLAIM);
601 if (!ppd->qib_wq)
602 goto wq_error;
603 }
604 }
605 return 0;
606wq_error:
607 pr_err("create_singlethread_workqueue failed for port %d\n",
608 pidx + 1);
609 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
610 ppd = dd->pport + pidx;
611 if (ppd->qib_wq) {
612 destroy_workqueue(ppd->qib_wq);
613 ppd->qib_wq = NULL;
614 }
615 }
616 return -ENOMEM;
617}
618
619static void qib_free_pportdata(struct qib_pportdata *ppd)
620{
621 free_percpu(ppd->ibport_data.pmastats);
622 free_percpu(ppd->ibport_data.rvp.rc_acks);
623 free_percpu(ppd->ibport_data.rvp.rc_qacks);
624 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
625 ppd->ibport_data.pmastats = NULL;
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643int qib_init(struct qib_devdata *dd, int reinit)
644{
645 int ret = 0, pidx, lastfail = 0;
646 u32 portok = 0;
647 unsigned i;
648 struct qib_ctxtdata *rcd;
649 struct qib_pportdata *ppd;
650 unsigned long flags;
651
652
653 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
654 ppd = dd->pport + pidx;
655 spin_lock_irqsave(&ppd->lflags_lock, flags);
656 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
657 QIBL_LINKDOWN | QIBL_LINKINIT |
658 QIBL_LINKV);
659 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
660 }
661
662 if (reinit)
663 ret = init_after_reset(dd);
664 else
665 ret = loadtime_init(dd);
666 if (ret)
667 goto done;
668
669
670 if (qib_mini_init)
671 return 0;
672
673 ret = dd->f_late_initreg(dd);
674 if (ret)
675 goto done;
676
677
678 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
679
680
681
682
683
684
685 rcd = dd->rcd[i];
686 if (!rcd)
687 continue;
688
689 lastfail = qib_create_rcvhdrq(dd, rcd);
690 if (!lastfail)
691 lastfail = qib_setup_eagerbufs(rcd);
692 if (lastfail) {
693 qib_dev_err(dd,
694 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
695 continue;
696 }
697 }
698
699 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
700 int mtu;
701
702 if (lastfail)
703 ret = lastfail;
704 ppd = dd->pport + pidx;
705 mtu = ib_mtu_enum_to_int(qib_ibmtu);
706 if (mtu == -1) {
707 mtu = QIB_DEFAULT_MTU;
708 qib_ibmtu = 0;
709 }
710
711 ppd->init_ibmaxlen = min(mtu > 2048 ?
712 dd->piosize4k : dd->piosize2k,
713 dd->rcvegrbufsize +
714 (dd->rcvhdrentsize << 2));
715
716
717
718
719 ppd->ibmaxlen = ppd->init_ibmaxlen;
720 qib_set_mtu(ppd, mtu);
721
722 spin_lock_irqsave(&ppd->lflags_lock, flags);
723 ppd->lflags |= QIBL_IB_LINK_DISABLED;
724 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
725
726 lastfail = dd->f_bringup_serdes(ppd);
727 if (lastfail) {
728 qib_devinfo(dd->pcidev,
729 "Failed to bringup IB port %u\n", ppd->port);
730 lastfail = -ENETDOWN;
731 continue;
732 }
733
734 portok++;
735 }
736
737 if (!portok) {
738
739 if (!ret && lastfail)
740 ret = lastfail;
741 else if (!ret)
742 ret = -ENETDOWN;
743
744 }
745
746 enable_chip(dd);
747
748 init_piobuf_state(dd);
749
750done:
751 if (!ret) {
752
753 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
754 ppd = dd->pport + pidx;
755
756
757
758
759 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
760 QIB_STATUS_INITTED;
761 if (!ppd->link_speed_enabled)
762 continue;
763 if (dd->flags & QIB_HAS_SEND_DMA)
764 ret = qib_setup_sdma(ppd);
765 timer_setup(&ppd->hol_timer, qib_hol_event, 0);
766 ppd->hol_state = QIB_HOL_UP;
767 }
768
769
770 dd->f_set_intr_state(dd, 1);
771
772
773
774
775
776 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
777
778 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
779 }
780
781
782 return ret;
783}
784
785
786
787
788
789
790
791int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
792{
793 return -EOPNOTSUPP;
794}
795
796void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
797{
798}
799
800static inline struct qib_devdata *__qib_lookup(int unit)
801{
802 return idr_find(&qib_unit_table, unit);
803}
804
805struct qib_devdata *qib_lookup(int unit)
806{
807 struct qib_devdata *dd;
808 unsigned long flags;
809
810 spin_lock_irqsave(&qib_devs_lock, flags);
811 dd = __qib_lookup(unit);
812 spin_unlock_irqrestore(&qib_devs_lock, flags);
813
814 return dd;
815}
816
817
818
819
820
821static void qib_stop_timers(struct qib_devdata *dd)
822{
823 struct qib_pportdata *ppd;
824 int pidx;
825
826 if (dd->stats_timer.function)
827 del_timer_sync(&dd->stats_timer);
828 if (dd->intrchk_timer.function)
829 del_timer_sync(&dd->intrchk_timer);
830 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
831 ppd = dd->pport + pidx;
832 if (ppd->hol_timer.function)
833 del_timer_sync(&ppd->hol_timer);
834 if (ppd->led_override_timer.function) {
835 del_timer_sync(&ppd->led_override_timer);
836 atomic_set(&ppd->led_override_timer_active, 0);
837 }
838 if (ppd->symerr_clear_timer.function)
839 del_timer_sync(&ppd->symerr_clear_timer);
840 }
841}
842
843
844
845
846
847
848
849
850
851
852static void qib_shutdown_device(struct qib_devdata *dd)
853{
854 struct qib_pportdata *ppd;
855 unsigned pidx;
856
857 if (dd->flags & QIB_SHUTDOWN)
858 return;
859 dd->flags |= QIB_SHUTDOWN;
860
861 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
862 ppd = dd->pport + pidx;
863
864 spin_lock_irq(&ppd->lflags_lock);
865 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
866 QIBL_LINKARMED | QIBL_LINKACTIVE |
867 QIBL_LINKV);
868 spin_unlock_irq(&ppd->lflags_lock);
869 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
870 }
871 dd->flags &= ~QIB_INITTED;
872
873
874 dd->f_set_intr_state(dd, 0);
875
876 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
877 ppd = dd->pport + pidx;
878 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
879 QIB_RCVCTRL_CTXT_DIS |
880 QIB_RCVCTRL_INTRAVAIL_DIS |
881 QIB_RCVCTRL_PKEY_ENB, -1);
882
883
884
885
886 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
887 }
888
889
890
891
892
893 udelay(20);
894
895 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
896 ppd = dd->pport + pidx;
897 dd->f_setextled(ppd, 0);
898
899 if (dd->flags & QIB_HAS_SEND_DMA)
900 qib_teardown_sdma(ppd);
901
902 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
903 QIB_SENDCTRL_SEND_DIS);
904
905
906
907
908 dd->f_quiet_serdes(ppd);
909
910 if (ppd->qib_wq) {
911 destroy_workqueue(ppd->qib_wq);
912 ppd->qib_wq = NULL;
913 }
914 qib_free_pportdata(ppd);
915 }
916
917}
918
919
920
921
922
923
924
925
926
927
928
929
930void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
931{
932 if (!rcd)
933 return;
934
935 if (rcd->rcvhdrq) {
936 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
937 rcd->rcvhdrq, rcd->rcvhdrq_phys);
938 rcd->rcvhdrq = NULL;
939 if (rcd->rcvhdrtail_kvaddr) {
940 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
941 rcd->rcvhdrtail_kvaddr,
942 rcd->rcvhdrqtailaddr_phys);
943 rcd->rcvhdrtail_kvaddr = NULL;
944 }
945 }
946 if (rcd->rcvegrbuf) {
947 unsigned e;
948
949 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
950 void *base = rcd->rcvegrbuf[e];
951 size_t size = rcd->rcvegrbuf_size;
952
953 dma_free_coherent(&dd->pcidev->dev, size,
954 base, rcd->rcvegrbuf_phys[e]);
955 }
956 kfree(rcd->rcvegrbuf);
957 rcd->rcvegrbuf = NULL;
958 kfree(rcd->rcvegrbuf_phys);
959 rcd->rcvegrbuf_phys = NULL;
960 rcd->rcvegrbuf_chunks = 0;
961 }
962
963 kfree(rcd->tid_pg_list);
964 vfree(rcd->user_event_mask);
965 vfree(rcd->subctxt_uregbase);
966 vfree(rcd->subctxt_rcvegrbuf);
967 vfree(rcd->subctxt_rcvhdr_base);
968#ifdef CONFIG_DEBUG_FS
969 kfree(rcd->opstats);
970 rcd->opstats = NULL;
971#endif
972 kfree(rcd);
973}
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988static void qib_verify_pioperf(struct qib_devdata *dd)
989{
990 u32 pbnum, cnt, lcnt;
991 u32 __iomem *piobuf;
992 u32 *addr;
993 u64 msecs, emsecs;
994
995 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
996 if (!piobuf) {
997 qib_devinfo(dd->pcidev,
998 "No PIObufs for checking perf, skipping\n");
999 return;
1000 }
1001
1002
1003
1004
1005
1006 cnt = 1024;
1007
1008 addr = vmalloc(cnt);
1009 if (!addr)
1010 goto done;
1011
1012 preempt_disable();
1013 msecs = 1 + jiffies_to_msecs(jiffies);
1014 for (lcnt = 0; lcnt < 10000U; lcnt++) {
1015
1016 if (jiffies_to_msecs(jiffies) >= msecs)
1017 break;
1018 udelay(1);
1019 }
1020
1021 dd->f_set_armlaunch(dd, 0);
1022
1023
1024
1025
1026 writeq(0, piobuf);
1027 qib_flush_wc();
1028
1029
1030
1031
1032
1033
1034 msecs = jiffies_to_msecs(jiffies);
1035 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
1036 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
1037 emsecs = jiffies_to_msecs(jiffies) - msecs;
1038 }
1039
1040
1041 if (lcnt < (emsecs * 1024U))
1042 qib_dev_err(dd,
1043 "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
1044 lcnt / (u32) emsecs);
1045
1046 preempt_enable();
1047
1048 vfree(addr);
1049
1050done:
1051
1052 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
1053 qib_sendbuf_done(dd, pbnum);
1054 dd->f_set_armlaunch(dd, 1);
1055}
1056
1057void qib_free_devdata(struct qib_devdata *dd)
1058{
1059 unsigned long flags;
1060
1061 spin_lock_irqsave(&qib_devs_lock, flags);
1062 idr_remove(&qib_unit_table, dd->unit);
1063 list_del(&dd->list);
1064 spin_unlock_irqrestore(&qib_devs_lock, flags);
1065
1066#ifdef CONFIG_DEBUG_FS
1067 qib_dbg_ibdev_exit(&dd->verbs_dev);
1068#endif
1069 free_percpu(dd->int_counter);
1070 rvt_dealloc_device(&dd->verbs_dev.rdi);
1071}
1072
1073u64 qib_int_counter(struct qib_devdata *dd)
1074{
1075 int cpu;
1076 u64 int_counter = 0;
1077
1078 for_each_possible_cpu(cpu)
1079 int_counter += *per_cpu_ptr(dd->int_counter, cpu);
1080 return int_counter;
1081}
1082
1083u64 qib_sps_ints(void)
1084{
1085 unsigned long flags;
1086 struct qib_devdata *dd;
1087 u64 sps_ints = 0;
1088
1089 spin_lock_irqsave(&qib_devs_lock, flags);
1090 list_for_each_entry(dd, &qib_dev_list, list) {
1091 sps_ints += qib_int_counter(dd);
1092 }
1093 spin_unlock_irqrestore(&qib_devs_lock, flags);
1094 return sps_ints;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
1106{
1107 unsigned long flags;
1108 struct qib_devdata *dd;
1109 int ret, nports;
1110
1111
1112 nports = extra / sizeof(struct qib_pportdata);
1113 dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1114 nports);
1115 if (!dd)
1116 return ERR_PTR(-ENOMEM);
1117
1118 INIT_LIST_HEAD(&dd->list);
1119
1120 idr_preload(GFP_KERNEL);
1121 spin_lock_irqsave(&qib_devs_lock, flags);
1122
1123 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
1124 if (ret >= 0) {
1125 dd->unit = ret;
1126 list_add(&dd->list, &qib_dev_list);
1127 }
1128
1129 spin_unlock_irqrestore(&qib_devs_lock, flags);
1130 idr_preload_end();
1131
1132 if (ret < 0) {
1133 qib_early_err(&pdev->dev,
1134 "Could not allocate unit ID: error %d\n", -ret);
1135 goto bail;
1136 }
1137 rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
1138
1139 dd->int_counter = alloc_percpu(u64);
1140 if (!dd->int_counter) {
1141 ret = -ENOMEM;
1142 qib_early_err(&pdev->dev,
1143 "Could not allocate per-cpu int_counter\n");
1144 goto bail;
1145 }
1146
1147 if (!qib_cpulist_count) {
1148 u32 count = num_online_cpus();
1149
1150 qib_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1151 GFP_KERNEL);
1152 if (qib_cpulist)
1153 qib_cpulist_count = count;
1154 }
1155#ifdef CONFIG_DEBUG_FS
1156 qib_dbg_ibdev_init(&dd->verbs_dev);
1157#endif
1158 return dd;
1159bail:
1160 if (!list_empty(&dd->list))
1161 list_del_init(&dd->list);
1162 rvt_dealloc_device(&dd->verbs_dev.rdi);
1163 return ERR_PTR(ret);
1164}
1165
1166
1167
1168
1169
1170
1171void qib_disable_after_error(struct qib_devdata *dd)
1172{
1173 if (dd->flags & QIB_INITTED) {
1174 u32 pidx;
1175
1176 dd->flags &= ~QIB_INITTED;
1177 if (dd->pport)
1178 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1179 struct qib_pportdata *ppd;
1180
1181 ppd = dd->pport + pidx;
1182 if (dd->flags & QIB_PRESENT) {
1183 qib_set_linkstate(ppd,
1184 QIB_IB_LINKDOWN_DISABLE);
1185 dd->f_setextled(ppd, 0);
1186 }
1187 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1188 }
1189 }
1190
1191
1192
1193
1194
1195
1196 if (dd->devstatusp)
1197 *dd->devstatusp |= QIB_STATUS_HWERROR;
1198}
1199
1200static void qib_remove_one(struct pci_dev *);
1201static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
1202static void qib_shutdown_one(struct pci_dev *);
1203
1204#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
1205#define PFX QIB_DRV_NAME ": "
1206
1207static const struct pci_device_id qib_pci_tbl[] = {
1208 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1210 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1211 { 0, }
1212};
1213
1214MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1215
1216static struct pci_driver qib_driver = {
1217 .name = QIB_DRV_NAME,
1218 .probe = qib_init_one,
1219 .remove = qib_remove_one,
1220 .shutdown = qib_shutdown_one,
1221 .id_table = qib_pci_tbl,
1222 .err_handler = &qib_pci_err_handler,
1223};
1224
1225#ifdef CONFIG_INFINIBAND_QIB_DCA
1226
1227static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
1228static struct notifier_block dca_notifier = {
1229 .notifier_call = qib_notify_dca,
1230 .next = NULL,
1231 .priority = 0
1232};
1233
1234static int qib_notify_dca_device(struct device *device, void *data)
1235{
1236 struct qib_devdata *dd = dev_get_drvdata(device);
1237 unsigned long event = *(unsigned long *)data;
1238
1239 return dd->f_notify_dca(dd, event);
1240}
1241
1242static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
1243 void *p)
1244{
1245 int rval;
1246
1247 rval = driver_for_each_device(&qib_driver.driver, NULL,
1248 &event, qib_notify_dca_device);
1249 return rval ? NOTIFY_BAD : NOTIFY_DONE;
1250}
1251
1252#endif
1253
1254
1255
1256
1257
1258static int __init qib_ib_init(void)
1259{
1260 int ret;
1261
1262 ret = qib_dev_init();
1263 if (ret)
1264 goto bail;
1265
1266
1267
1268
1269
1270 idr_init(&qib_unit_table);
1271
1272#ifdef CONFIG_INFINIBAND_QIB_DCA
1273 dca_register_notify(&dca_notifier);
1274#endif
1275#ifdef CONFIG_DEBUG_FS
1276 qib_dbg_init();
1277#endif
1278 ret = pci_register_driver(&qib_driver);
1279 if (ret < 0) {
1280 pr_err("Unable to register driver: error %d\n", -ret);
1281 goto bail_dev;
1282 }
1283
1284
1285 if (qib_init_qibfs())
1286 pr_err("Unable to register ipathfs\n");
1287 goto bail;
1288
1289bail_dev:
1290#ifdef CONFIG_INFINIBAND_QIB_DCA
1291 dca_unregister_notify(&dca_notifier);
1292#endif
1293#ifdef CONFIG_DEBUG_FS
1294 qib_dbg_exit();
1295#endif
1296 idr_destroy(&qib_unit_table);
1297 qib_dev_cleanup();
1298bail:
1299 return ret;
1300}
1301
1302module_init(qib_ib_init);
1303
1304
1305
1306
1307static void __exit qib_ib_cleanup(void)
1308{
1309 int ret;
1310
1311 ret = qib_exit_qibfs();
1312 if (ret)
1313 pr_err(
1314 "Unable to cleanup counter filesystem: error %d\n",
1315 -ret);
1316
1317#ifdef CONFIG_INFINIBAND_QIB_DCA
1318 dca_unregister_notify(&dca_notifier);
1319#endif
1320 pci_unregister_driver(&qib_driver);
1321#ifdef CONFIG_DEBUG_FS
1322 qib_dbg_exit();
1323#endif
1324
1325 qib_cpulist_count = 0;
1326 kfree(qib_cpulist);
1327
1328 idr_destroy(&qib_unit_table);
1329 qib_dev_cleanup();
1330}
1331
1332module_exit(qib_ib_cleanup);
1333
1334
1335static void cleanup_device_data(struct qib_devdata *dd)
1336{
1337 int ctxt;
1338 int pidx;
1339 struct qib_ctxtdata **tmp;
1340 unsigned long flags;
1341
1342
1343 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1344 if (dd->pport[pidx].statusp)
1345 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1346
1347 spin_lock(&dd->pport[pidx].cc_shadow_lock);
1348
1349 kfree(dd->pport[pidx].congestion_entries);
1350 dd->pport[pidx].congestion_entries = NULL;
1351 kfree(dd->pport[pidx].ccti_entries);
1352 dd->pport[pidx].ccti_entries = NULL;
1353 kfree(dd->pport[pidx].ccti_entries_shadow);
1354 dd->pport[pidx].ccti_entries_shadow = NULL;
1355 kfree(dd->pport[pidx].congestion_entries_shadow);
1356 dd->pport[pidx].congestion_entries_shadow = NULL;
1357
1358 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1359 }
1360
1361 if (!qib_wc_pat)
1362 qib_disable_wc(dd);
1363
1364 if (dd->pioavailregs_dma) {
1365 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1366 (void *) dd->pioavailregs_dma,
1367 dd->pioavailregs_phys);
1368 dd->pioavailregs_dma = NULL;
1369 }
1370
1371 if (dd->pageshadow) {
1372 struct page **tmpp = dd->pageshadow;
1373 dma_addr_t *tmpd = dd->physshadow;
1374 int i;
1375
1376 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1377 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1378 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1379
1380 for (i = ctxt_tidbase; i < maxtid; i++) {
1381 if (!tmpp[i])
1382 continue;
1383 pci_unmap_page(dd->pcidev, tmpd[i],
1384 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1385 qib_release_user_pages(&tmpp[i], 1);
1386 tmpp[i] = NULL;
1387 }
1388 }
1389
1390 dd->pageshadow = NULL;
1391 vfree(tmpp);
1392 dd->physshadow = NULL;
1393 vfree(tmpd);
1394 }
1395
1396
1397
1398
1399
1400
1401
1402
1403 spin_lock_irqsave(&dd->uctxt_lock, flags);
1404 tmp = dd->rcd;
1405 dd->rcd = NULL;
1406 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1407 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1408 struct qib_ctxtdata *rcd = tmp[ctxt];
1409
1410 tmp[ctxt] = NULL;
1411 qib_free_ctxtdata(dd, rcd);
1412 }
1413 kfree(tmp);
1414}
1415
1416
1417
1418
1419
1420static void qib_postinit_cleanup(struct qib_devdata *dd)
1421{
1422
1423
1424
1425
1426
1427
1428
1429 if (dd->f_cleanup)
1430 dd->f_cleanup(dd);
1431
1432 qib_pcie_ddcleanup(dd);
1433
1434 cleanup_device_data(dd);
1435
1436 qib_free_devdata(dd);
1437}
1438
1439static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1440{
1441 int ret, j, pidx, initfail;
1442 struct qib_devdata *dd = NULL;
1443
1444 ret = qib_pcie_init(pdev, ent);
1445 if (ret)
1446 goto bail;
1447
1448
1449
1450
1451
1452 switch (ent->device) {
1453 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1454#ifdef CONFIG_PCI_MSI
1455 dd = qib_init_iba6120_funcs(pdev, ent);
1456#else
1457 qib_early_err(&pdev->dev,
1458 "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
1459 ent->device);
1460 dd = ERR_PTR(-ENODEV);
1461#endif
1462 break;
1463
1464 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1465 dd = qib_init_iba7220_funcs(pdev, ent);
1466 break;
1467
1468 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1469 dd = qib_init_iba7322_funcs(pdev, ent);
1470 break;
1471
1472 default:
1473 qib_early_err(&pdev->dev,
1474 "Failing on unknown Intel deviceid 0x%x\n",
1475 ent->device);
1476 ret = -ENODEV;
1477 }
1478
1479 if (IS_ERR(dd))
1480 ret = PTR_ERR(dd);
1481 if (ret)
1482 goto bail;
1483
1484 ret = qib_create_workqueues(dd);
1485 if (ret)
1486 goto bail;
1487
1488
1489 initfail = qib_init(dd, 0);
1490
1491 ret = qib_register_ib_device(dd);
1492
1493
1494
1495
1496
1497
1498
1499 if (!qib_mini_init && !initfail && !ret)
1500 dd->flags |= QIB_INITTED;
1501
1502 j = qib_device_create(dd);
1503 if (j)
1504 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1505 j = qibfs_add(dd);
1506 if (j)
1507 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1508 -j);
1509
1510 if (qib_mini_init || initfail || ret) {
1511 qib_stop_timers(dd);
1512 flush_workqueue(ib_wq);
1513 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1514 dd->f_quiet_serdes(dd->pport + pidx);
1515 if (qib_mini_init)
1516 goto bail;
1517 if (!j) {
1518 (void) qibfs_remove(dd);
1519 qib_device_remove(dd);
1520 }
1521 if (!ret)
1522 qib_unregister_ib_device(dd);
1523 qib_postinit_cleanup(dd);
1524 if (initfail)
1525 ret = initfail;
1526 goto bail;
1527 }
1528
1529 if (!qib_wc_pat) {
1530 ret = qib_enable_wc(dd);
1531 if (ret) {
1532 qib_dev_err(dd,
1533 "Write combining not enabled (err %d): performance may be poor\n",
1534 -ret);
1535 ret = 0;
1536 }
1537 }
1538
1539 qib_verify_pioperf(dd);
1540bail:
1541 return ret;
1542}
1543
1544static void qib_remove_one(struct pci_dev *pdev)
1545{
1546 struct qib_devdata *dd = pci_get_drvdata(pdev);
1547 int ret;
1548
1549
1550 qib_unregister_ib_device(dd);
1551
1552
1553
1554
1555
1556 if (!qib_mini_init)
1557 qib_shutdown_device(dd);
1558
1559 qib_stop_timers(dd);
1560
1561
1562 flush_workqueue(ib_wq);
1563
1564 ret = qibfs_remove(dd);
1565 if (ret)
1566 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1567 -ret);
1568
1569 qib_device_remove(dd);
1570
1571 qib_postinit_cleanup(dd);
1572}
1573
1574static void qib_shutdown_one(struct pci_dev *pdev)
1575{
1576 struct qib_devdata *dd = pci_get_drvdata(pdev);
1577
1578 qib_shutdown_device(dd);
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1591{
1592 unsigned amt;
1593 int old_node_id;
1594
1595 if (!rcd->rcvhdrq) {
1596 dma_addr_t phys_hdrqtail;
1597 gfp_t gfp_flags;
1598
1599 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1600 sizeof(u32), PAGE_SIZE);
1601 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1602 GFP_USER : GFP_KERNEL;
1603
1604 old_node_id = dev_to_node(&dd->pcidev->dev);
1605 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1606 rcd->rcvhdrq = dma_alloc_coherent(
1607 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1608 gfp_flags | __GFP_COMP);
1609 set_dev_node(&dd->pcidev->dev, old_node_id);
1610
1611 if (!rcd->rcvhdrq) {
1612 qib_dev_err(dd,
1613 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1614 amt, rcd->ctxt);
1615 goto bail;
1616 }
1617
1618 if (rcd->ctxt >= dd->first_user_ctxt) {
1619 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1620 if (!rcd->user_event_mask)
1621 goto bail_free_hdrq;
1622 }
1623
1624 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1625 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1626 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1627 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1628 gfp_flags);
1629 set_dev_node(&dd->pcidev->dev, old_node_id);
1630 if (!rcd->rcvhdrtail_kvaddr)
1631 goto bail_free;
1632 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1633 }
1634
1635 rcd->rcvhdrq_size = amt;
1636 }
1637
1638
1639 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1640 if (rcd->rcvhdrtail_kvaddr)
1641 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1642 return 0;
1643
1644bail_free:
1645 qib_dev_err(dd,
1646 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1647 rcd->ctxt);
1648 vfree(rcd->user_event_mask);
1649 rcd->user_event_mask = NULL;
1650bail_free_hdrq:
1651 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1652 rcd->rcvhdrq_phys);
1653 rcd->rcvhdrq = NULL;
1654bail:
1655 return -ENOMEM;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1668{
1669 struct qib_devdata *dd = rcd->dd;
1670 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1671 size_t size;
1672 gfp_t gfp_flags;
1673 int old_node_id;
1674
1675
1676
1677
1678
1679
1680
1681 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
1682
1683 egrcnt = rcd->rcvegrcnt;
1684 egroff = rcd->rcvegr_tid_base;
1685 egrsize = dd->rcvegrbufsize;
1686
1687 chunk = rcd->rcvegrbuf_chunks;
1688 egrperchunk = rcd->rcvegrbufs_perchunk;
1689 size = rcd->rcvegrbuf_size;
1690 if (!rcd->rcvegrbuf) {
1691 rcd->rcvegrbuf =
1692 kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
1693 GFP_KERNEL, rcd->node_id);
1694 if (!rcd->rcvegrbuf)
1695 goto bail;
1696 }
1697 if (!rcd->rcvegrbuf_phys) {
1698 rcd->rcvegrbuf_phys =
1699 kmalloc_array_node(chunk,
1700 sizeof(rcd->rcvegrbuf_phys[0]),
1701 GFP_KERNEL, rcd->node_id);
1702 if (!rcd->rcvegrbuf_phys)
1703 goto bail_rcvegrbuf;
1704 }
1705 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1706 if (rcd->rcvegrbuf[e])
1707 continue;
1708
1709 old_node_id = dev_to_node(&dd->pcidev->dev);
1710 set_dev_node(&dd->pcidev->dev, rcd->node_id);
1711 rcd->rcvegrbuf[e] =
1712 dma_alloc_coherent(&dd->pcidev->dev, size,
1713 &rcd->rcvegrbuf_phys[e],
1714 gfp_flags);
1715 set_dev_node(&dd->pcidev->dev, old_node_id);
1716 if (!rcd->rcvegrbuf[e])
1717 goto bail_rcvegrbuf_phys;
1718 }
1719
1720 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1721
1722 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1723 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1724 unsigned i;
1725
1726
1727 memset(rcd->rcvegrbuf[chunk], 0, size);
1728
1729 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1730 dd->f_put_tid(dd, e + egroff +
1731 (u64 __iomem *)
1732 ((char __iomem *)
1733 dd->kregbase +
1734 dd->rcvegrbase),
1735 RCVHQ_RCV_TYPE_EAGER, pa);
1736 pa += egrsize;
1737 }
1738 cond_resched();
1739 }
1740
1741 return 0;
1742
1743bail_rcvegrbuf_phys:
1744 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1745 dma_free_coherent(&dd->pcidev->dev, size,
1746 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1747 kfree(rcd->rcvegrbuf_phys);
1748 rcd->rcvegrbuf_phys = NULL;
1749bail_rcvegrbuf:
1750 kfree(rcd->rcvegrbuf);
1751 rcd->rcvegrbuf = NULL;
1752bail:
1753 return -ENOMEM;
1754}
1755
1756
1757
1758
1759
1760
1761
1762int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1763{
1764 u64 __iomem *qib_kregbase = NULL;
1765 void __iomem *qib_piobase = NULL;
1766 u64 __iomem *qib_userbase = NULL;
1767 u64 qib_kreglen;
1768 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1769 u64 qib_pio4koffset = dd->piobufbase >> 32;
1770 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1771 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1772 u64 qib_physaddr = dd->physaddr;
1773 u64 qib_piolen;
1774 u64 qib_userlen = 0;
1775
1776
1777
1778
1779
1780
1781 iounmap(dd->kregbase);
1782 dd->kregbase = NULL;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 if (dd->piobcnt4k == 0) {
1794 qib_kreglen = qib_pio2koffset;
1795 qib_piolen = qib_pio2klen;
1796 } else if (qib_pio2koffset < qib_pio4koffset) {
1797 qib_kreglen = qib_pio2koffset;
1798 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1799 } else {
1800 qib_kreglen = qib_pio4koffset;
1801 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1802 }
1803 qib_piolen += vl15buflen;
1804
1805 if (dd->uregbase > qib_kreglen)
1806 qib_userlen = dd->ureg_align * dd->cfgctxts;
1807
1808
1809 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1810 if (!qib_kregbase)
1811 goto bail;
1812
1813 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1814 if (!qib_piobase)
1815 goto bail_kregbase;
1816
1817 if (qib_userlen) {
1818 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1819 qib_userlen);
1820 if (!qib_userbase)
1821 goto bail_piobase;
1822 }
1823
1824 dd->kregbase = qib_kregbase;
1825 dd->kregend = (u64 __iomem *)
1826 ((char __iomem *) qib_kregbase + qib_kreglen);
1827 dd->piobase = qib_piobase;
1828 dd->pio2kbase = (void __iomem *)
1829 (((char __iomem *) dd->piobase) +
1830 qib_pio2koffset - qib_kreglen);
1831 if (dd->piobcnt4k)
1832 dd->pio4kbase = (void __iomem *)
1833 (((char __iomem *) dd->piobase) +
1834 qib_pio4koffset - qib_kreglen);
1835 if (qib_userlen)
1836
1837 dd->userbase = qib_userbase;
1838 return 0;
1839
1840bail_piobase:
1841 iounmap(qib_piobase);
1842bail_kregbase:
1843 iounmap(qib_kregbase);
1844bail:
1845 return -ENOMEM;
1846}
1847