1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/sched.h>
35#include <linux/spinlock.h>
36#include <linux/idr.h>
37#include <linux/pci.h>
38#include <linux/io.h>
39#include <linux/delay.h>
40#include <linux/netdevice.h>
41#include <linux/vmalloc.h>
42#include <linux/bitmap.h>
43#include <linux/slab.h>
44
45#include "ipath_kernel.h"
46#include "ipath_verbs.h"
47
48static void ipath_update_pio_bufs(struct ipath_devdata *);
49
50const char *ipath_get_unit_name(int unit)
51{
52 static char iname[16];
53 snprintf(iname, sizeof iname, "infinipath%u", unit);
54 return iname;
55}
56
57#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
58#define PFX IPATH_DRV_NAME ": "
59
60
61
62
63
64const char ib_ipath_version[] = IPATH_IDSTR "\n";
65
66static struct idr unit_table;
67DEFINE_SPINLOCK(ipath_devs_lock);
68LIST_HEAD(ipath_dev_list);
69
70wait_queue_head_t ipath_state_wait;
71
72unsigned ipath_debug = __IPATH_INFO;
73
74module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
75MODULE_PARM_DESC(debug, "mask for debug prints");
76EXPORT_SYMBOL_GPL(ipath_debug);
77
78unsigned ipath_mtu4096 = 1;
79module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
80MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
81
82static unsigned ipath_hol_timeout_ms = 13000;
83module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
84MODULE_PARM_DESC(hol_timeout_ms,
85 "duration of user app suspension after link failure");
86
87unsigned ipath_linkrecovery = 1;
88module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
89MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
90
91MODULE_LICENSE("GPL");
92MODULE_AUTHOR("QLogic <support@qlogic.com>");
93MODULE_DESCRIPTION("QLogic InfiniPath driver");
94
95
96
97
98
99const char *ipath_ibcstatus_str[] = {
100 "Disabled",
101 "LinkUp",
102 "PollActive",
103 "PollQuiet",
104 "SleepDelay",
105 "SleepQuiet",
106 "LState6",
107 "LState7",
108 "CfgDebounce",
109 "CfgRcvfCfg",
110 "CfgWaitRmt",
111 "CfgIdle",
112 "RecovRetrain",
113 "CfgTxRevLane",
114 "RecovWaitRmt",
115 "RecovIdle",
116
117 "CfgEnhanced",
118 "CfgTest",
119 "CfgWaitRmtTest",
120 "CfgWaitCfgEnhanced",
121 "SendTS_T",
122 "SendTstIdles",
123 "RcvTS_T",
124 "SendTst_TS1s",
125 "LTState18", "LTState19", "LTState1A", "LTState1B",
126 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
127};
128
129static void __devexit ipath_remove_one(struct pci_dev *);
130static int __devinit ipath_init_one(struct pci_dev *,
131 const struct pci_device_id *);
132
133
134#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
135#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
136
137
138#define STATUS_TIMEOUT 60
139
140static const struct pci_device_id ipath_pci_tbl[] = {
141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
142 { 0, }
143};
144
145MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
146
147static struct pci_driver ipath_driver = {
148 .name = IPATH_DRV_NAME,
149 .probe = ipath_init_one,
150 .remove = __devexit_p(ipath_remove_one),
151 .id_table = ipath_pci_tbl,
152 .driver = {
153 .groups = ipath_driver_attr_groups,
154 },
155};
156
157static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
158 u32 *bar0, u32 *bar1)
159{
160 int ret;
161
162 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
163 if (ret)
164 ipath_dev_err(dd, "failed to read bar0 before enable: "
165 "error %d\n", -ret);
166
167 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
168 if (ret)
169 ipath_dev_err(dd, "failed to read bar1 before enable: "
170 "error %d\n", -ret);
171
172 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
173}
174
175static void ipath_free_devdata(struct pci_dev *pdev,
176 struct ipath_devdata *dd)
177{
178 unsigned long flags;
179
180 pci_set_drvdata(pdev, NULL);
181
182 if (dd->ipath_unit != -1) {
183 spin_lock_irqsave(&ipath_devs_lock, flags);
184 idr_remove(&unit_table, dd->ipath_unit);
185 list_del(&dd->ipath_list);
186 spin_unlock_irqrestore(&ipath_devs_lock, flags);
187 }
188 vfree(dd);
189}
190
191static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
192{
193 unsigned long flags;
194 struct ipath_devdata *dd;
195 int ret;
196
197 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
198 dd = ERR_PTR(-ENOMEM);
199 goto bail;
200 }
201
202 dd = vzalloc(sizeof(*dd));
203 if (!dd) {
204 dd = ERR_PTR(-ENOMEM);
205 goto bail;
206 }
207 dd->ipath_unit = -1;
208
209 spin_lock_irqsave(&ipath_devs_lock, flags);
210
211 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
212 if (ret < 0) {
213 printk(KERN_ERR IPATH_DRV_NAME
214 ": Could not allocate unit ID: error %d\n", -ret);
215 ipath_free_devdata(pdev, dd);
216 dd = ERR_PTR(ret);
217 goto bail_unlock;
218 }
219
220 dd->pcidev = pdev;
221 pci_set_drvdata(pdev, dd);
222
223 list_add(&dd->ipath_list, &ipath_dev_list);
224
225bail_unlock:
226 spin_unlock_irqrestore(&ipath_devs_lock, flags);
227
228bail:
229 return dd;
230}
231
232static inline struct ipath_devdata *__ipath_lookup(int unit)
233{
234 return idr_find(&unit_table, unit);
235}
236
237struct ipath_devdata *ipath_lookup(int unit)
238{
239 struct ipath_devdata *dd;
240 unsigned long flags;
241
242 spin_lock_irqsave(&ipath_devs_lock, flags);
243 dd = __ipath_lookup(unit);
244 spin_unlock_irqrestore(&ipath_devs_lock, flags);
245
246 return dd;
247}
248
249int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
250{
251 int nunits, npresent, nup;
252 struct ipath_devdata *dd;
253 unsigned long flags;
254 int maxports;
255
256 nunits = npresent = nup = maxports = 0;
257
258 spin_lock_irqsave(&ipath_devs_lock, flags);
259
260 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
261 nunits++;
262 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
263 npresent++;
264 if (dd->ipath_lid &&
265 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
266 | IPATH_LINKUNK)))
267 nup++;
268 if (dd->ipath_cfgports > maxports)
269 maxports = dd->ipath_cfgports;
270 }
271
272 spin_unlock_irqrestore(&ipath_devs_lock, flags);
273
274 if (npresentp)
275 *npresentp = npresent;
276 if (nupp)
277 *nupp = nup;
278 if (maxportsp)
279 *maxportsp = maxports;
280
281 return nunits;
282}
283
284
285
286
287
288
289
290int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
291{
292 return -EOPNOTSUPP;
293}
294
295void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
296{
297}
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312static void ipath_verify_pioperf(struct ipath_devdata *dd)
313{
314 u32 pbnum, cnt, lcnt;
315 u32 __iomem *piobuf;
316 u32 *addr;
317 u64 msecs, emsecs;
318
319 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
320 if (!piobuf) {
321 dev_info(&dd->pcidev->dev,
322 "No PIObufs for checking perf, skipping\n");
323 return;
324 }
325
326
327
328
329
330 cnt = 1024;
331
332 addr = vmalloc(cnt);
333 if (!addr) {
334 dev_info(&dd->pcidev->dev,
335 "Couldn't get memory for checking PIO perf,"
336 " skipping\n");
337 goto done;
338 }
339
340 preempt_disable();
341 msecs = 1 + jiffies_to_msecs(jiffies);
342 for (lcnt = 0; lcnt < 10000U; lcnt++) {
343
344 if (jiffies_to_msecs(jiffies) >= msecs)
345 break;
346 udelay(1);
347 }
348
349 ipath_disable_armlaunch(dd);
350
351
352
353
354
355 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
356 writeq(1UL << 63, piobuf);
357 else
358 writeq(0, piobuf);
359 ipath_flush_wc();
360
361
362
363
364
365
366 msecs = jiffies_to_msecs(jiffies);
367 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
368 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
369 emsecs = jiffies_to_msecs(jiffies) - msecs;
370 }
371
372
373 if (lcnt < (emsecs * 1024U))
374 ipath_dev_err(dd,
375 "Performance problem: bandwidth to PIO buffers is "
376 "only %u MiB/sec\n",
377 lcnt / (u32) emsecs);
378 else
379 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
380 lcnt / (u32) emsecs);
381
382 preempt_enable();
383
384 vfree(addr);
385
386done:
387
388 ipath_disarm_piobufs(dd, pbnum, 1);
389 ipath_enable_armlaunch(dd);
390}
391
392static void cleanup_device(struct ipath_devdata *dd);
393
394static int __devinit ipath_init_one(struct pci_dev *pdev,
395 const struct pci_device_id *ent)
396{
397 int ret, len, j;
398 struct ipath_devdata *dd;
399 unsigned long long addr;
400 u32 bar0 = 0, bar1 = 0;
401 u8 rev;
402
403 dd = ipath_alloc_devdata(pdev);
404 if (IS_ERR(dd)) {
405 ret = PTR_ERR(dd);
406 printk(KERN_ERR IPATH_DRV_NAME
407 ": Could not allocate devdata: error %d\n", -ret);
408 goto bail;
409 }
410
411 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
412
413 ret = pci_enable_device(pdev);
414 if (ret) {
415
416
417
418
419
420
421
422
423
424
425
426
427 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
428 dd->ipath_unit, -ret);
429 goto bail_devdata;
430 }
431 addr = pci_resource_start(pdev, 0);
432 len = pci_resource_len(pdev, 0);
433 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
434 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
435 ent->device, ent->driver_data);
436
437 read_bars(dd, pdev, &bar0, &bar1);
438
439 if (!bar1 && !(bar0 & ~0xf)) {
440 if (addr) {
441 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
442 "rewriting as %llx\n", addr);
443 ret = pci_write_config_dword(
444 pdev, PCI_BASE_ADDRESS_0, addr);
445 if (ret) {
446 ipath_dev_err(dd, "rewrite of BAR0 "
447 "failed: err %d\n", -ret);
448 goto bail_disable;
449 }
450 ret = pci_write_config_dword(
451 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
452 if (ret) {
453 ipath_dev_err(dd, "rewrite of BAR1 "
454 "failed: err %d\n", -ret);
455 goto bail_disable;
456 }
457 } else {
458 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
459 "not usable until reboot\n");
460 ret = -ENODEV;
461 goto bail_disable;
462 }
463 }
464
465 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
466 if (ret) {
467 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
468 "err %d\n", dd->ipath_unit, -ret);
469 goto bail_disable;
470 }
471
472 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
473 if (ret) {
474
475
476
477
478
479 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
480 if (ret) {
481 dev_info(&pdev->dev,
482 "Unable to set DMA mask for unit %u: %d\n",
483 dd->ipath_unit, ret);
484 goto bail_regions;
485 }
486 else {
487 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
488 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
489 if (ret)
490 dev_info(&pdev->dev,
491 "Unable to set DMA consistent mask "
492 "for unit %u: %d\n",
493 dd->ipath_unit, ret);
494
495 }
496 }
497 else {
498 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
499 if (ret)
500 dev_info(&pdev->dev,
501 "Unable to set DMA consistent mask "
502 "for unit %u: %d\n",
503 dd->ipath_unit, ret);
504 }
505
506 pci_set_master(pdev);
507
508
509
510
511
512 dd->ipath_pcibar0 = addr;
513 dd->ipath_pcibar1 = addr >> 32;
514 dd->ipath_deviceid = ent->device;
515 dd->ipath_vendorid = ent->vendor;
516
517
518 switch (ent->device) {
519 case PCI_DEVICE_ID_INFINIPATH_HT:
520 ipath_init_iba6110_funcs(dd);
521 break;
522
523 default:
524 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
525 "failing\n", ent->device);
526 return -ENODEV;
527 }
528
529 for (j = 0; j < 6; j++) {
530 if (!pdev->resource[j].start)
531 continue;
532 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
533 j, &pdev->resource[j],
534 (unsigned long long)pci_resource_len(pdev, j));
535 }
536
537 if (!addr) {
538 ipath_dev_err(dd, "No valid address in BAR 0!\n");
539 ret = -ENODEV;
540 goto bail_regions;
541 }
542
543 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
544 if (ret) {
545 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
546 "%u: err %d\n", dd->ipath_unit, -ret);
547 goto bail_regions;
548 }
549 dd->ipath_pcirev = rev;
550
551#if defined(__powerpc__)
552
553 dd->ipath_kregbase = __ioremap(addr, len,
554 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
555#else
556 dd->ipath_kregbase = ioremap_nocache(addr, len);
557#endif
558
559 if (!dd->ipath_kregbase) {
560 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
561 addr);
562 ret = -ENOMEM;
563 goto bail_iounmap;
564 }
565 dd->ipath_kregend = (u64 __iomem *)
566 ((void __iomem *)dd->ipath_kregbase + len);
567 dd->ipath_physaddr = addr;
568
569 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
570 addr, dd->ipath_kregbase);
571
572 if (dd->ipath_f_bus(dd, pdev))
573 ipath_dev_err(dd, "Failed to setup config space; "
574 "continuing anyway\n");
575
576
577
578
579
580
581
582 if (!dd->ipath_irq)
583 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
584 "work\n");
585 else {
586 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
587 IPATH_DRV_NAME, dd);
588 if (ret) {
589 ipath_dev_err(dd, "Couldn't setup irq handler, "
590 "irq=%d: %d\n", dd->ipath_irq, ret);
591 goto bail_iounmap;
592 }
593 }
594
595 ret = ipath_init_chip(dd, 0);
596 if (ret)
597 goto bail_irqsetup;
598
599 ret = ipath_enable_wc(dd);
600
601 if (ret) {
602 ipath_dev_err(dd, "Write combining not enabled "
603 "(err %d): performance may be poor\n",
604 -ret);
605 ret = 0;
606 }
607
608 ipath_verify_pioperf(dd);
609
610 ipath_device_create_group(&pdev->dev, dd);
611 ipathfs_add_device(dd);
612 ipath_user_add(dd);
613 ipath_diag_add(dd);
614 ipath_register_ib_device(dd);
615
616 goto bail;
617
618bail_irqsetup:
619 cleanup_device(dd);
620
621 if (dd->ipath_irq)
622 dd->ipath_f_free_irq(dd);
623
624 if (dd->ipath_f_cleanup)
625 dd->ipath_f_cleanup(dd);
626
627bail_iounmap:
628 iounmap((volatile void __iomem *) dd->ipath_kregbase);
629
630bail_regions:
631 pci_release_regions(pdev);
632
633bail_disable:
634 pci_disable_device(pdev);
635
636bail_devdata:
637 ipath_free_devdata(pdev, dd);
638
639bail:
640 return ret;
641}
642
643static void cleanup_device(struct ipath_devdata *dd)
644{
645 int port;
646 struct ipath_portdata **tmp;
647 unsigned long flags;
648
649 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
650
651 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
652 if (dd->ipath_kregbase) {
653
654
655
656
657
658 dd->ipath_kregbase = NULL;
659 dd->ipath_uregbase = 0;
660 dd->ipath_sregbase = 0;
661 dd->ipath_cregbase = 0;
662 dd->ipath_kregsize = 0;
663 }
664 ipath_disable_wc(dd);
665 }
666
667 if (dd->ipath_spectriggerhit)
668 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
669 dd->ipath_spectriggerhit);
670
671 if (dd->ipath_pioavailregs_dma) {
672 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
673 (void *) dd->ipath_pioavailregs_dma,
674 dd->ipath_pioavailregs_phys);
675 dd->ipath_pioavailregs_dma = NULL;
676 }
677 if (dd->ipath_dummy_hdrq) {
678 dma_free_coherent(&dd->pcidev->dev,
679 dd->ipath_pd[0]->port_rcvhdrq_size,
680 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
681 dd->ipath_dummy_hdrq = NULL;
682 }
683
684 if (dd->ipath_pageshadow) {
685 struct page **tmpp = dd->ipath_pageshadow;
686 dma_addr_t *tmpd = dd->ipath_physshadow;
687 int i, cnt = 0;
688
689 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
690 "locked\n");
691 for (port = 0; port < dd->ipath_cfgports; port++) {
692 int port_tidbase = port * dd->ipath_rcvtidcnt;
693 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
694 for (i = port_tidbase; i < maxtid; i++) {
695 if (!tmpp[i])
696 continue;
697 pci_unmap_page(dd->pcidev, tmpd[i],
698 PAGE_SIZE, PCI_DMA_FROMDEVICE);
699 ipath_release_user_pages(&tmpp[i], 1);
700 tmpp[i] = NULL;
701 cnt++;
702 }
703 }
704 if (cnt) {
705 ipath_stats.sps_pageunlocks += cnt;
706 ipath_cdbg(VERBOSE, "There were still %u expTID "
707 "entries locked\n", cnt);
708 }
709 if (ipath_stats.sps_pagelocks ||
710 ipath_stats.sps_pageunlocks)
711 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
712 "unlocked via ipath_m{un}lock\n",
713 (unsigned long long)
714 ipath_stats.sps_pagelocks,
715 (unsigned long long)
716 ipath_stats.sps_pageunlocks);
717
718 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
719 dd->ipath_pageshadow);
720 tmpp = dd->ipath_pageshadow;
721 dd->ipath_pageshadow = NULL;
722 vfree(tmpp);
723
724 dd->ipath_egrtidbase = NULL;
725 }
726
727
728
729
730
731
732
733
734 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
735 tmp = dd->ipath_pd;
736 dd->ipath_pd = NULL;
737 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
738 for (port = 0; port < dd->ipath_portcnt; port++) {
739 struct ipath_portdata *pd = tmp[port];
740 tmp[port] = NULL;
741 ipath_free_pddata(dd, pd);
742 }
743 kfree(tmp);
744}
745
746static void __devexit ipath_remove_one(struct pci_dev *pdev)
747{
748 struct ipath_devdata *dd = pci_get_drvdata(pdev);
749
750 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
751
752
753
754
755
756 ipath_shutdown_device(dd);
757
758 flush_workqueue(ib_wq);
759
760 if (dd->verbs_dev)
761 ipath_unregister_ib_device(dd->verbs_dev);
762
763 ipath_diag_remove(dd);
764 ipath_user_remove(dd);
765 ipathfs_remove_device(dd);
766 ipath_device_remove_group(&pdev->dev, dd);
767
768 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
769 "unit %u\n", dd, (u32) dd->ipath_unit);
770
771 cleanup_device(dd);
772
773
774
775
776
777
778
779 if (dd->ipath_irq) {
780 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
781 dd->ipath_unit, dd->ipath_irq);
782 dd->ipath_f_free_irq(dd);
783 } else
784 ipath_dbg("irq is 0, not doing free_irq "
785 "for unit %u\n", dd->ipath_unit);
786
787
788
789
790
791
792 if (dd->ipath_f_cleanup)
793
794 dd->ipath_f_cleanup(dd);
795
796 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
797 iounmap((volatile void __iomem *) dd->ipath_kregbase);
798 pci_release_regions(pdev);
799 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
800 pci_disable_device(pdev);
801
802 ipath_free_devdata(pdev, dd);
803}
804
805
806DEFINE_MUTEX(ipath_mutex);
807
808static DEFINE_SPINLOCK(ipath_pioavail_lock);
809
810
811
812
813
814
815
816
817
818
819
820
821void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
822 unsigned cnt)
823{
824 unsigned i, last = first + cnt;
825 unsigned long flags;
826
827 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
828 for (i = first; i < last; i++) {
829 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
830
831
832
833
834
835 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
836 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
837 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
838
839 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
840 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
841 }
842
843 ipath_force_pio_avail_update(dd);
844}
845
846
847
848
849
850
851
852
853
854
855
856
857
858int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
859{
860 dd->ipath_state_wanted = state;
861 wait_event_interruptible_timeout(ipath_state_wait,
862 (dd->ipath_flags & state),
863 msecs_to_jiffies(msecs));
864 dd->ipath_state_wanted = 0;
865
866 if (!(dd->ipath_flags & state)) {
867 u64 val;
868 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
869 " ms\n",
870
871 (state & IPATH_LINKINIT) ? "INIT" :
872 ((state & IPATH_LINKDOWN) ? "DOWN" :
873 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
874 msecs);
875 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
876 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
877 (unsigned long long) ipath_read_kreg64(
878 dd, dd->ipath_kregs->kr_ibcctrl),
879 (unsigned long long) val,
880 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
881 }
882 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
883}
884
885static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
886 char *buf, size_t blen)
887{
888 static const struct {
889 ipath_err_t err;
890 const char *msg;
891 } errs[] = {
892 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
893 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
894 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
895 { INFINIPATH_E_SDMABASE, "SDmaBase" },
896 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
897 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
898 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
899 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
900 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
901 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
902 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
903 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
904 };
905 int i;
906 int expected;
907 size_t bidx = 0;
908
909 for (i = 0; i < ARRAY_SIZE(errs); i++) {
910 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
911 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
912 if ((err & errs[i].err) && !expected)
913 bidx += snprintf(buf + bidx, blen - bidx,
914 "%s ", errs[i].msg);
915 }
916}
917
918
919
920
921
922
923
924int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
925 ipath_err_t err)
926{
927 int iserr = 1;
928 *buf = '\0';
929 if (err & INFINIPATH_E_PKTERRS) {
930 if (!(err & ~INFINIPATH_E_PKTERRS))
931 iserr = 0;
932 if (ipath_debug & __IPATH_ERRPKTDBG) {
933 if (err & INFINIPATH_E_REBP)
934 strlcat(buf, "EBP ", blen);
935 if (err & INFINIPATH_E_RVCRC)
936 strlcat(buf, "VCRC ", blen);
937 if (err & INFINIPATH_E_RICRC) {
938 strlcat(buf, "CRC ", blen);
939
940 err &= INFINIPATH_E_RICRC;
941 }
942 if (err & INFINIPATH_E_RSHORTPKTLEN)
943 strlcat(buf, "rshortpktlen ", blen);
944 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
945 strlcat(buf, "sdroppeddatapkt ", blen);
946 if (err & INFINIPATH_E_SPKTLEN)
947 strlcat(buf, "spktlen ", blen);
948 }
949 if ((err & INFINIPATH_E_RICRC) &&
950 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
951 strlcat(buf, "CRC ", blen);
952 if (!iserr)
953 goto done;
954 }
955 if (err & INFINIPATH_E_RHDRLEN)
956 strlcat(buf, "rhdrlen ", blen);
957 if (err & INFINIPATH_E_RBADTID)
958 strlcat(buf, "rbadtid ", blen);
959 if (err & INFINIPATH_E_RBADVERSION)
960 strlcat(buf, "rbadversion ", blen);
961 if (err & INFINIPATH_E_RHDR)
962 strlcat(buf, "rhdr ", blen);
963 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
964 strlcat(buf, "sendspecialtrigger ", blen);
965 if (err & INFINIPATH_E_RLONGPKTLEN)
966 strlcat(buf, "rlongpktlen ", blen);
967 if (err & INFINIPATH_E_RMAXPKTLEN)
968 strlcat(buf, "rmaxpktlen ", blen);
969 if (err & INFINIPATH_E_RMINPKTLEN)
970 strlcat(buf, "rminpktlen ", blen);
971 if (err & INFINIPATH_E_SMINPKTLEN)
972 strlcat(buf, "sminpktlen ", blen);
973 if (err & INFINIPATH_E_RFORMATERR)
974 strlcat(buf, "rformaterr ", blen);
975 if (err & INFINIPATH_E_RUNSUPVL)
976 strlcat(buf, "runsupvl ", blen);
977 if (err & INFINIPATH_E_RUNEXPCHAR)
978 strlcat(buf, "runexpchar ", blen);
979 if (err & INFINIPATH_E_RIBFLOW)
980 strlcat(buf, "ribflow ", blen);
981 if (err & INFINIPATH_E_SUNDERRUN)
982 strlcat(buf, "sunderrun ", blen);
983 if (err & INFINIPATH_E_SPIOARMLAUNCH)
984 strlcat(buf, "spioarmlaunch ", blen);
985 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
986 strlcat(buf, "sunexperrpktnum ", blen);
987 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
988 strlcat(buf, "sdroppedsmppkt ", blen);
989 if (err & INFINIPATH_E_SMAXPKTLEN)
990 strlcat(buf, "smaxpktlen ", blen);
991 if (err & INFINIPATH_E_SUNSUPVL)
992 strlcat(buf, "sunsupVL ", blen);
993 if (err & INFINIPATH_E_INVALIDADDR)
994 strlcat(buf, "invalidaddr ", blen);
995 if (err & INFINIPATH_E_RRCVEGRFULL)
996 strlcat(buf, "rcvegrfull ", blen);
997 if (err & INFINIPATH_E_RRCVHDRFULL)
998 strlcat(buf, "rcvhdrfull ", blen);
999 if (err & INFINIPATH_E_IBSTATUSCHANGED)
1000 strlcat(buf, "ibcstatuschg ", blen);
1001 if (err & INFINIPATH_E_RIBLOSTLINK)
1002 strlcat(buf, "riblostlink ", blen);
1003 if (err & INFINIPATH_E_HARDWARE)
1004 strlcat(buf, "hardware ", blen);
1005 if (err & INFINIPATH_E_RESET)
1006 strlcat(buf, "reset ", blen);
1007 if (err & INFINIPATH_E_SDMAERRS)
1008 decode_sdma_errs(dd, err, buf, blen);
1009 if (err & INFINIPATH_E_INVALIDEEPCMD)
1010 strlcat(buf, "invalideepromcmd ", blen);
1011done:
1012 return iserr;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static void get_rhf_errstring(u32 err, char *msg, size_t len)
1024{
1025
1026 *msg = '\0';
1027
1028 if (err & INFINIPATH_RHF_H_ICRCERR)
1029 strlcat(msg, "icrcerr ", len);
1030 if (err & INFINIPATH_RHF_H_VCRCERR)
1031 strlcat(msg, "vcrcerr ", len);
1032 if (err & INFINIPATH_RHF_H_PARITYERR)
1033 strlcat(msg, "parityerr ", len);
1034 if (err & INFINIPATH_RHF_H_LENERR)
1035 strlcat(msg, "lenerr ", len);
1036 if (err & INFINIPATH_RHF_H_MTUERR)
1037 strlcat(msg, "mtuerr ", len);
1038 if (err & INFINIPATH_RHF_H_IHDRERR)
1039
1040 strlcat(msg, "ipathhdrerr ", len);
1041 if (err & INFINIPATH_RHF_H_TIDERR)
1042 strlcat(msg, "tiderr ", len);
1043 if (err & INFINIPATH_RHF_H_MKERR)
1044
1045 strlcat(msg, "invalid ipathhdr ", len);
1046 if (err & INFINIPATH_RHF_H_IBERR)
1047 strlcat(msg, "iberr ", len);
1048 if (err & INFINIPATH_RHF_L_SWA)
1049 strlcat(msg, "swA ", len);
1050 if (err & INFINIPATH_RHF_L_SWB)
1051 strlcat(msg, "swB ", len);
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1062{
1063 return dd->ipath_port0_skbinfo ?
1064 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1065}
1066
1067
1068
1069
1070
1071
1072struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1073 gfp_t gfp_mask)
1074{
1075 struct sk_buff *skb;
1076 u32 len;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 len = dd->ipath_ibmaxlen + 4;
1091
1092 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1093
1094
1095
1096
1097 len += 2047;
1098 }
1099
1100 skb = __dev_alloc_skb(len, gfp_mask);
1101 if (!skb) {
1102 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1103 len);
1104 goto bail;
1105 }
1106
1107 skb_reserve(skb, 4);
1108
1109 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1110 u32 una = (unsigned long)skb->data & 2047;
1111 if (una)
1112 skb_reserve(skb, 2048 - una);
1113 }
1114
1115bail:
1116 return skb;
1117}
1118
1119static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1120 u32 eflags,
1121 u32 l,
1122 u32 etail,
1123 __le32 *rhf_addr,
1124 struct ipath_message_header *hdr)
1125{
1126 char emsg[128];
1127
1128 get_rhf_errstring(eflags, emsg, sizeof emsg);
1129 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1130 "tlen=%x opcode=%x egridx=%x: %s\n",
1131 eflags, l,
1132 ipath_hdrget_rcv_type(rhf_addr),
1133 ipath_hdrget_length_in_bytes(rhf_addr),
1134 be32_to_cpu(hdr->bth[0]) >> 24,
1135 etail, emsg);
1136
1137
1138 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1139 u8 n = (dd->ipath_ibcctrl >>
1140 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1141 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1142
1143 if (++dd->ipath_lli_counter > n) {
1144 dd->ipath_lli_counter = 0;
1145 dd->ipath_lli_errors++;
1146 }
1147 }
1148}
1149
1150
1151
1152
1153
1154
1155
1156void ipath_kreceive(struct ipath_portdata *pd)
1157{
1158 struct ipath_devdata *dd = pd->port_dd;
1159 __le32 *rhf_addr;
1160 void *ebuf;
1161 const u32 rsize = dd->ipath_rcvhdrentsize;
1162 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;
1163 u32 etail = -1, l, hdrqtail;
1164 struct ipath_message_header *hdr;
1165 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1166 static u64 totcalls;
1167 int last;
1168
1169 l = pd->port_head;
1170 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1171 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1172 u32 seq = ipath_hdrget_seq(rhf_addr);
1173
1174 if (seq != pd->port_seq_cnt)
1175 goto bail;
1176 hdrqtail = 0;
1177 } else {
1178 hdrqtail = ipath_get_rcvhdrtail(pd);
1179 if (l == hdrqtail)
1180 goto bail;
1181 smp_rmb();
1182 }
1183
1184reloop:
1185 for (last = 0, i = 1; !last; i += !last) {
1186 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1187 eflags = ipath_hdrget_err_flags(rhf_addr);
1188 etype = ipath_hdrget_rcv_type(rhf_addr);
1189
1190 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1191 ebuf = NULL;
1192 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1193 ipath_hdrget_use_egr_buf(rhf_addr) :
1194 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1195
1196
1197
1198
1199
1200
1201
1202 etail = ipath_hdrget_index(rhf_addr);
1203 updegr = 1;
1204 if (tlen > sizeof(*hdr) ||
1205 etype == RCVHQ_RCV_TYPE_NON_KD)
1206 ebuf = ipath_get_egrbuf(dd, etail);
1207 }
1208
1209
1210
1211
1212
1213
1214 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1215 etype != RCVHQ_RCV_TYPE_ERROR &&
1216 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1217 IPS_PROTO_VERSION)
1218 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1219 "%x\n", etype);
1220
1221 if (unlikely(eflags))
1222 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1223 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1224 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1225 if (dd->ipath_lli_counter)
1226 dd->ipath_lli_counter--;
1227 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1228 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1229 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1230 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1231 "qp=%x), len %x; ignored\n",
1232 etype, opcode, qp, tlen);
1233 }
1234 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1235 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1236 be32_to_cpu(hdr->bth[0]) >> 24);
1237 else {
1238
1239
1240
1241
1242
1243
1244
1245 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1246 " %x, len %x hdrq+%x rhf: %Lx\n",
1247 etail, tlen, l, (unsigned long long)
1248 le64_to_cpu(*(__le64 *) rhf_addr));
1249 if (ipath_debug & __IPATH_ERRPKTDBG) {
1250 u32 j, *d, dw = rsize-2;
1251 if (rsize > (tlen>>2))
1252 dw = tlen>>2;
1253 d = (u32 *)hdr;
1254 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1255 dw);
1256 for (j = 0; j < dw; j++)
1257 printk(KERN_DEBUG "%8x%s", d[j],
1258 (j%8) == 7 ? "\n" : " ");
1259 printk(KERN_DEBUG ".\n");
1260 }
1261 }
1262 l += rsize;
1263 if (l >= maxcnt)
1264 l = 0;
1265 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1266 l + dd->ipath_rhf_offset;
1267 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1268 u32 seq = ipath_hdrget_seq(rhf_addr);
1269
1270 if (++pd->port_seq_cnt > 13)
1271 pd->port_seq_cnt = 1;
1272 if (seq != pd->port_seq_cnt)
1273 last = 1;
1274 } else if (l == hdrqtail)
1275 last = 1;
1276
1277
1278
1279
1280
1281 if (last || !(i & 0xf)) {
1282 u64 lval = l;
1283
1284
1285 if (last)
1286 lval |= dd->ipath_rhdrhead_intr_off;
1287 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1288 pd->port_port);
1289 if (updegr) {
1290 ipath_write_ureg(dd, ur_rcvegrindexhead,
1291 etail, pd->port_port);
1292 updegr = 0;
1293 }
1294 }
1295 }
1296
1297 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1298 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1299
1300
1301
1302
1303
1304
1305
1306
1307 u32 hqtail = ipath_get_rcvhdrtail(pd);
1308 if (hqtail != hdrqtail) {
1309 hdrqtail = hqtail;
1310 reloop = 1;
1311 goto reloop;
1312 }
1313 }
1314
1315 pkttot += i;
1316
1317 pd->port_head = l;
1318
1319 if (pkttot > ipath_stats.sps_maxpkts_call)
1320 ipath_stats.sps_maxpkts_call = pkttot;
1321 ipath_stats.sps_port0pkts += pkttot;
1322 ipath_stats.sps_avgpkts_call =
1323 ipath_stats.sps_port0pkts / ++totcalls;
1324
1325bail:;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1338{
1339 unsigned long flags;
1340 int i;
1341 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360 if (!dd->ipath_pioavailregs_dma) {
1361 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1362 return;
1363 }
1364 if (ipath_debug & __IPATH_VERBDBG) {
1365
1366 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1367 unsigned long *shadow = dd->ipath_pioavailshadow;
1368
1369 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1370 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1371 "s3=%lx\n",
1372 (unsigned long long) le64_to_cpu(dma[0]),
1373 shadow[0],
1374 (unsigned long long) le64_to_cpu(dma[1]),
1375 shadow[1],
1376 (unsigned long long) le64_to_cpu(dma[2]),
1377 shadow[2],
1378 (unsigned long long) le64_to_cpu(dma[3]),
1379 shadow[3]);
1380 if (piobregs > 4)
1381 ipath_cdbg(
1382 PKT, "2nd group, dma4=%llx shad4=%lx, "
1383 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1384 "d7=%llx s7=%lx\n",
1385 (unsigned long long) le64_to_cpu(dma[4]),
1386 shadow[4],
1387 (unsigned long long) le64_to_cpu(dma[5]),
1388 shadow[5],
1389 (unsigned long long) le64_to_cpu(dma[6]),
1390 shadow[6],
1391 (unsigned long long) le64_to_cpu(dma[7]),
1392 shadow[7]);
1393 }
1394 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1395 for (i = 0; i < piobregs; i++) {
1396 u64 pchbusy, pchg, piov, pnew;
1397
1398
1399
1400 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1401 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1402 else
1403 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1404 pchg = dd->ipath_pioavailkernel[i] &
1405 ~(dd->ipath_pioavailshadow[i] ^ piov);
1406 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1407 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1408 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1409 pnew |= piov & pchbusy;
1410 dd->ipath_pioavailshadow[i] = pnew;
1411 }
1412 }
1413 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1414}
1415
1416
1417
1418
1419
1420
1421
1422static void ipath_reset_availshadow(struct ipath_devdata *dd)
1423{
1424 int i, im;
1425 unsigned long flags;
1426
1427 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1428 for (i = 0; i < dd->ipath_pioavregs; i++) {
1429 u64 val, oldval;
1430
1431 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1432 i ^ 1 : i;
1433 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1434
1435
1436
1437
1438 oldval = dd->ipath_pioavailshadow[i];
1439 dd->ipath_pioavailshadow[i] = val |
1440 ((~dd->ipath_pioavailkernel[i] <<
1441 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1442 0xaaaaaaaaaaaaaaaaULL);
1443 if (oldval != dd->ipath_pioavailshadow[i])
1444 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1445 i, (unsigned long long) oldval,
1446 dd->ipath_pioavailshadow[i]);
1447 }
1448 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1459{
1460 int ret = 0;
1461
1462 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1463 if (dd->ipath_rcvhdrsize != rhdrsize) {
1464 dev_info(&dd->pcidev->dev,
1465 "Error: can't set protocol header "
1466 "size %u, already %u\n",
1467 rhdrsize, dd->ipath_rcvhdrsize);
1468 ret = -EAGAIN;
1469 } else
1470 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1471 "size %u\n", dd->ipath_rcvhdrsize);
1472 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1473 (sizeof(u64) / sizeof(u32)))) {
1474 ipath_dbg("Error: can't set protocol header size %u "
1475 "(> max %u)\n", rhdrsize,
1476 dd->ipath_rcvhdrentsize -
1477 (u32) (sizeof(u64) / sizeof(u32)));
1478 ret = -EOVERFLOW;
1479 } else {
1480 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1481 dd->ipath_rcvhdrsize = rhdrsize;
1482 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1483 dd->ipath_rcvhdrsize);
1484 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1485 dd->ipath_rcvhdrsize);
1486 }
1487 return ret;
1488}
1489
1490
1491
1492
1493static noinline void no_pio_bufs(struct ipath_devdata *dd)
1494{
1495 unsigned long *shadow = dd->ipath_pioavailshadow;
1496 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1497
1498 dd->ipath_upd_pio_shadow = 1;
1499
1500
1501
1502
1503 ipath_stats.sps_nopiobufs++;
1504 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1505 ipath_force_pio_avail_update(dd);
1506 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1507 "%llx %llx %llx %llx\n"
1508 "ipath shadow: %lx %lx %lx %lx\n",
1509 dd->ipath_consec_nopiobuf,
1510 (unsigned long)get_cycles(),
1511 (unsigned long long) le64_to_cpu(dma[0]),
1512 (unsigned long long) le64_to_cpu(dma[1]),
1513 (unsigned long long) le64_to_cpu(dma[2]),
1514 (unsigned long long) le64_to_cpu(dma[3]),
1515 shadow[0], shadow[1], shadow[2], shadow[3]);
1516
1517
1518
1519
1520 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1521 (sizeof(shadow[0]) * 4 * 4))
1522 ipath_dbg("2nd group: dmacopy: "
1523 "%llx %llx %llx %llx\n"
1524 "ipath shadow: %lx %lx %lx %lx\n",
1525 (unsigned long long)le64_to_cpu(dma[4]),
1526 (unsigned long long)le64_to_cpu(dma[5]),
1527 (unsigned long long)le64_to_cpu(dma[6]),
1528 (unsigned long long)le64_to_cpu(dma[7]),
1529 shadow[4], shadow[5], shadow[6], shadow[7]);
1530
1531
1532 ipath_reset_availshadow(dd);
1533 }
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1544 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1545{
1546 int i, j, updated = 0;
1547 unsigned piobcnt;
1548 unsigned long flags;
1549 unsigned long *shadow = dd->ipath_pioavailshadow;
1550 u32 __iomem *buf;
1551
1552 piobcnt = last - first;
1553 if (dd->ipath_upd_pio_shadow) {
1554
1555
1556
1557
1558
1559 ipath_update_pio_bufs(dd);
1560 updated++;
1561 i = first;
1562 } else
1563 i = firsti;
1564rescan:
1565
1566
1567
1568
1569
1570 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1571 for (j = 0; j < piobcnt; j++, i++) {
1572 if (i >= last)
1573 i = first;
1574 if (__test_and_set_bit((2 * i) + 1, shadow))
1575 continue;
1576
1577 __change_bit(2 * i, shadow);
1578 break;
1579 }
1580 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1581
1582 if (j == piobcnt) {
1583 if (!updated) {
1584
1585
1586
1587
1588 ipath_update_pio_bufs(dd);
1589 updated++;
1590 i = first;
1591 goto rescan;
1592 } else if (updated == 1 && piobcnt <=
1593 ((dd->ipath_sendctrl
1594 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1595 INFINIPATH_S_UPDTHRESH_MASK)) {
1596
1597
1598
1599
1600
1601
1602 ipath_force_pio_avail_update(dd);
1603 ipath_update_pio_bufs(dd);
1604 updated++;
1605 i = first;
1606 goto rescan;
1607 }
1608
1609 no_pio_bufs(dd);
1610 buf = NULL;
1611 } else {
1612 if (i < dd->ipath_piobcnt2k)
1613 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1614 i * dd->ipath_palign);
1615 else
1616 buf = (u32 __iomem *)
1617 (dd->ipath_pio4kbase +
1618 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1619 if (pbufnum)
1620 *pbufnum = i;
1621 }
1622
1623 return buf;
1624}
1625
1626
1627
1628
1629
1630
1631
1632u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1633{
1634 u32 __iomem *buf;
1635 u32 pnum, nbufs;
1636 u32 first, lasti;
1637
1638 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1639 first = dd->ipath_piobcnt2k;
1640 lasti = dd->ipath_lastpioindexl;
1641 } else {
1642 first = 0;
1643 lasti = dd->ipath_lastpioindex;
1644 }
1645 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1646 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1647
1648 if (buf) {
1649
1650
1651
1652
1653 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1654 dd->ipath_lastpioindexl = pnum + 1;
1655 else
1656 dd->ipath_lastpioindex = pnum + 1;
1657 if (dd->ipath_upd_pio_shadow)
1658 dd->ipath_upd_pio_shadow = 0;
1659 if (dd->ipath_consec_nopiobuf)
1660 dd->ipath_consec_nopiobuf = 0;
1661 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1662 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1663 if (pbufnum)
1664 *pbufnum = pnum;
1665
1666 }
1667 return buf;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1678 unsigned len, int avail)
1679{
1680 unsigned long flags;
1681 unsigned end, cnt = 0;
1682
1683
1684 start *= 2;
1685 end = start + len * 2;
1686
1687 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1688
1689 while (start < end) {
1690 if (avail) {
1691 unsigned long dma;
1692 int i, im;
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 i = start / BITS_PER_LONG;
1708 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1709 i ^ 1 : i;
1710 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1711 + start, dd->ipath_pioavailshadow);
1712 dma = (unsigned long) le64_to_cpu(
1713 dd->ipath_pioavailregs_dma[im]);
1714 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1715 + start) % BITS_PER_LONG, &dma))
1716 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1717 + start, dd->ipath_pioavailshadow);
1718 else
1719 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1720 + start, dd->ipath_pioavailshadow);
1721 __set_bit(start, dd->ipath_pioavailkernel);
1722 } else {
1723 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1724 dd->ipath_pioavailshadow);
1725 __clear_bit(start, dd->ipath_pioavailkernel);
1726 }
1727 start += 2;
1728 }
1729
1730 if (dd->ipath_pioupd_thresh) {
1731 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1732 cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
1733 }
1734 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 if (!avail && len < cnt)
1748 cnt = len;
1749 if (cnt < dd->ipath_pioupd_thresh) {
1750 dd->ipath_pioupd_thresh = cnt;
1751 ipath_dbg("Decreased pio update threshold to %u\n",
1752 dd->ipath_pioupd_thresh);
1753 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1754 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1755 << INFINIPATH_S_UPDTHRESH_SHIFT);
1756 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1757 << INFINIPATH_S_UPDTHRESH_SHIFT;
1758 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1759 dd->ipath_sendctrl);
1760 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1761 }
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1774 struct ipath_portdata *pd)
1775{
1776 int ret = 0;
1777
1778 if (!pd->port_rcvhdrq) {
1779 dma_addr_t phys_hdrqtail;
1780 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1781 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1782 sizeof(u32), PAGE_SIZE);
1783
1784 pd->port_rcvhdrq = dma_alloc_coherent(
1785 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1786 gfp_flags);
1787
1788 if (!pd->port_rcvhdrq) {
1789 ipath_dev_err(dd, "attempt to allocate %d bytes "
1790 "for port %u rcvhdrq failed\n",
1791 amt, pd->port_port);
1792 ret = -ENOMEM;
1793 goto bail;
1794 }
1795
1796 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1797 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1798 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1799 GFP_KERNEL);
1800 if (!pd->port_rcvhdrtail_kvaddr) {
1801 ipath_dev_err(dd, "attempt to allocate 1 page "
1802 "for port %u rcvhdrqtailaddr "
1803 "failed\n", pd->port_port);
1804 ret = -ENOMEM;
1805 dma_free_coherent(&dd->pcidev->dev, amt,
1806 pd->port_rcvhdrq,
1807 pd->port_rcvhdrq_phys);
1808 pd->port_rcvhdrq = NULL;
1809 goto bail;
1810 }
1811 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1812 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1813 "physical\n", pd->port_port,
1814 (unsigned long long) phys_hdrqtail);
1815 }
1816
1817 pd->port_rcvhdrq_size = amt;
1818
1819 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1820 "for port %u rcvhdr Q\n",
1821 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1822 (unsigned long) pd->port_rcvhdrq_phys,
1823 (unsigned long) pd->port_rcvhdrq_size,
1824 pd->port_port);
1825 }
1826 else
1827 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1828 "hdrtailaddr@%p %llx physical\n",
1829 pd->port_port, pd->port_rcvhdrq,
1830 (unsigned long long) pd->port_rcvhdrq_phys,
1831 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1832 pd->port_rcvhdrqtailaddr_phys);
1833
1834
1835 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1836 if (pd->port_rcvhdrtail_kvaddr)
1837 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1838
1839
1840
1841
1842
1843 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1844 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
1845 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1846 pd->port_port, pd->port_rcvhdrq_phys);
1847
1848bail:
1849 return ret;
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1863{
1864 unsigned long flags;
1865
1866 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1867 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1868 goto bail;
1869 }
1870
1871
1872
1873
1874
1875
1876 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1877 int skip_cancel;
1878 unsigned long *statp = &dd->ipath_sdma_status;
1879
1880 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1881 skip_cancel =
1882 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1883 && !test_bit(IPATH_SDMA_DISABLED, statp);
1884 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1885 if (skip_cancel)
1886 goto bail;
1887 }
1888
1889 ipath_dbg("Cancelling all in-progress send buffers\n");
1890
1891
1892 dd->ipath_lastcancel = jiffies + HZ / 2;
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1903 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1904 | INFINIPATH_S_PIOENABLE);
1905 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1906 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1907 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1908 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1909
1910
1911 ipath_disarm_piobufs(dd, 0,
1912 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1913
1914 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1915 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1916
1917 if (restore_sendctrl) {
1918
1919 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1920 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1921 INFINIPATH_S_PIOENABLE;
1922 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1923 dd->ipath_sendctrl);
1924
1925 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1926 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1927 }
1928
1929 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1930 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1931 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1932 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1933
1934 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1935 dd->ipath_sdma_reset_wait = 200;
1936 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1937 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1938 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1939 }
1940bail:;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1952{
1953 unsigned long flags;
1954
1955 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1956 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1957 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1958 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1959 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1960 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1961 dd->ipath_sendctrl);
1962 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1963 }
1964 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1965}
1966
1967static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1968 int linitcmd)
1969{
1970 u64 mod_wd;
1971 static const char *what[4] = {
1972 [0] = "NOP",
1973 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1974 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1975 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1976 };
1977
1978 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1979
1980
1981
1982
1983 preempt_disable();
1984 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1985 preempt_enable();
1986 } else if (linitcmd) {
1987
1988
1989
1990
1991
1992 preempt_disable();
1993 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1994 preempt_enable();
1995 }
1996
1997 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1998 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1999 ipath_cdbg(VERBOSE,
2000 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
2001 dd->ipath_unit, what[linkcmd], linitcmd,
2002 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
2003 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
2004
2005 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2006 dd->ipath_ibcctrl | mod_wd);
2007
2008 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2009}
2010
2011int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2012{
2013 u32 lstate;
2014 int ret;
2015
2016 switch (newstate) {
2017 case IPATH_IB_LINKDOWN_ONLY:
2018 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2019
2020 ret = 0;
2021 goto bail;
2022
2023 case IPATH_IB_LINKDOWN:
2024 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2025 INFINIPATH_IBCC_LINKINITCMD_POLL);
2026
2027 ret = 0;
2028 goto bail;
2029
2030 case IPATH_IB_LINKDOWN_SLEEP:
2031 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2032 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2033
2034 ret = 0;
2035 goto bail;
2036
2037 case IPATH_IB_LINKDOWN_DISABLE:
2038 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2039 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2040
2041 ret = 0;
2042 goto bail;
2043
2044 case IPATH_IB_LINKARM:
2045 if (dd->ipath_flags & IPATH_LINKARMED) {
2046 ret = 0;
2047 goto bail;
2048 }
2049 if (!(dd->ipath_flags &
2050 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
2051 ret = -EINVAL;
2052 goto bail;
2053 }
2054 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2055
2056
2057
2058
2059
2060 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
2061 break;
2062
2063 case IPATH_IB_LINKACTIVE:
2064 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2065 ret = 0;
2066 goto bail;
2067 }
2068 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2069 ret = -EINVAL;
2070 goto bail;
2071 }
2072 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2073 lstate = IPATH_LINKACTIVE;
2074 break;
2075
2076 case IPATH_IB_LINK_LOOPBACK:
2077 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2078 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2079 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2080 dd->ipath_ibcctrl);
2081
2082
2083 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2084 IPATH_IB_HRTBT_OFF);
2085
2086 ret = 0;
2087 goto bail;
2088
2089 case IPATH_IB_LINK_EXTERNAL:
2090 dev_info(&dd->pcidev->dev,
2091 "Disabling IB local loopback (normal)\n");
2092 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2093 IPATH_IB_HRTBT_ON);
2094 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2095 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2096 dd->ipath_ibcctrl);
2097
2098 ret = 0;
2099 goto bail;
2100
2101
2102
2103
2104
2105
2106
2107 case IPATH_IB_LINK_HRTBT:
2108 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2109 IPATH_IB_HRTBT_ON);
2110 goto bail;
2111
2112 case IPATH_IB_LINK_NO_HRTBT:
2113 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2114 IPATH_IB_HRTBT_OFF);
2115 goto bail;
2116
2117 default:
2118 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
2119 ret = -EINVAL;
2120 goto bail;
2121 }
2122 ret = ipath_wait_linkstate(dd, lstate, 2000);
2123
2124bail:
2125 return ret;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2141{
2142 u32 piosize;
2143 int changed = 0;
2144 int ret;
2145
2146
2147
2148
2149
2150
2151
2152 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
2153 (arg != 4096 || !ipath_mtu4096)) {
2154 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
2155 ret = -EINVAL;
2156 goto bail;
2157 }
2158 if (dd->ipath_ibmtu == arg) {
2159 ret = 0;
2160 goto bail;
2161 }
2162
2163 piosize = dd->ipath_ibmaxlen;
2164 dd->ipath_ibmtu = arg;
2165
2166 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
2167
2168 if (piosize != dd->ipath_init_ibmaxlen) {
2169 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2170 piosize = dd->ipath_init_ibmaxlen;
2171 dd->ipath_ibmaxlen = piosize;
2172 changed = 1;
2173 }
2174 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2175 piosize = arg + IPATH_PIO_MAXIBHDR;
2176 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
2177 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2178 arg);
2179 dd->ipath_ibmaxlen = piosize;
2180 changed = 1;
2181 }
2182
2183 if (changed) {
2184 u64 ibc = dd->ipath_ibcctrl, ibdw;
2185
2186
2187
2188
2189
2190 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2191 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2192 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
2193 dd->ibcc_mpl_shift);
2194 ibc |= ibdw << dd->ibcc_mpl_shift;
2195 dd->ipath_ibcctrl = ibc;
2196 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2197 dd->ipath_ibcctrl);
2198 dd->ipath_f_tidtemplate(dd);
2199 }
2200
2201 ret = 0;
2202
2203bail:
2204 return ret;
2205}
2206
2207int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2208{
2209 dd->ipath_lid = lid;
2210 dd->ipath_lmc = lmc;
2211
2212 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2213 (~((1U << lmc) - 1)) << 16);
2214
2215 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2216
2217 return 0;
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2232 unsigned port, u64 value)
2233{
2234 u16 where;
2235
2236 if (port < dd->ipath_portcnt &&
2237 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2238 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2239 where = regno + port;
2240 else
2241 where = -1;
2242
2243 ipath_write_kreg(dd, where, value);
2244}
2245
2246
2247
2248
2249
2250
2251
2252
2253#define LED_OVER_FREQ_SHIFT 8
2254#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2255
2256#define LED_OVER_BOTH_OFF (8)
2257
2258static void ipath_run_led_override(unsigned long opaque)
2259{
2260 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2261 int timeoff;
2262 int pidx;
2263 u64 lstate, ltstate, val;
2264
2265 if (!(dd->ipath_flags & IPATH_INITTED))
2266 return;
2267
2268 pidx = dd->ipath_led_override_phase++ & 1;
2269 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2270 timeoff = dd->ipath_led_override_timeoff;
2271
2272
2273
2274
2275
2276
2277 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2278 ltstate = ipath_ib_linktrstate(dd, val);
2279 lstate = ipath_ib_linkstate(dd, val);
2280
2281 dd->ipath_f_setextled(dd, lstate, ltstate);
2282 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2283}
2284
2285void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2286{
2287 int timeoff, freq;
2288
2289 if (!(dd->ipath_flags & IPATH_INITTED))
2290 return;
2291
2292
2293 timeoff = HZ;
2294 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2295
2296 if (freq) {
2297
2298 dd->ipath_led_override_vals[0] = val & 0xF;
2299 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2300 timeoff = (HZ << 4)/freq;
2301 } else {
2302
2303 dd->ipath_led_override_vals[0] = val & 0xF;
2304 dd->ipath_led_override_vals[1] = val & 0xF;
2305 }
2306 dd->ipath_led_override_timeoff = timeoff;
2307
2308
2309
2310
2311
2312 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2313
2314 init_timer(&dd->ipath_led_override_timer);
2315 dd->ipath_led_override_timer.function =
2316 ipath_run_led_override;
2317 dd->ipath_led_override_timer.data = (unsigned long) dd;
2318 dd->ipath_led_override_timer.expires = jiffies + 1;
2319 add_timer(&dd->ipath_led_override_timer);
2320 } else
2321 atomic_dec(&dd->ipath_led_override_timer_active);
2322}
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333void ipath_shutdown_device(struct ipath_devdata *dd)
2334{
2335 unsigned long flags;
2336
2337 ipath_dbg("Shutting down the device\n");
2338
2339 ipath_hol_up(dd);
2340
2341 dd->ipath_flags |= IPATH_LINKUNK;
2342 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2343 IPATH_LINKINIT | IPATH_LINKARMED |
2344 IPATH_LINKACTIVE);
2345 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2346 IPATH_STATUS_IB_READY);
2347
2348
2349 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2350
2351 dd->ipath_rcvctrl = 0;
2352 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2353 dd->ipath_rcvctrl);
2354
2355 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2356 teardown_sdma(dd);
2357
2358
2359
2360
2361
2362 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2363 dd->ipath_sendctrl = 0;
2364 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2365
2366 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2367 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2368
2369
2370
2371
2372
2373 udelay(5);
2374
2375 dd->ipath_f_setextled(dd, 0, 0);
2376
2377 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2378 ipath_cancel_sends(dd, 0);
2379
2380
2381
2382
2383
2384
2385 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2386
2387
2388 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2389 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2390 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2391
2392
2393
2394
2395
2396
2397 dd->ipath_f_quiet_serdes(dd);
2398
2399
2400 del_timer_sync(&dd->ipath_hol_timer);
2401 if (dd->ipath_stats_timer_active) {
2402 del_timer_sync(&dd->ipath_stats_timer);
2403 dd->ipath_stats_timer_active = 0;
2404 }
2405 if (dd->ipath_intrchk_timer.data) {
2406 del_timer_sync(&dd->ipath_intrchk_timer);
2407 dd->ipath_intrchk_timer.data = 0;
2408 }
2409 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2410 del_timer_sync(&dd->ipath_led_override_timer);
2411 atomic_set(&dd->ipath_led_override_timer_active, 0);
2412 }
2413
2414
2415
2416
2417
2418
2419 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2420 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2421 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2422 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2423
2424 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2425 ipath_update_eeprom_log(dd);
2426}
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2441{
2442 if (!pd)
2443 return;
2444
2445 if (pd->port_rcvhdrq) {
2446 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2447 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2448 (unsigned long) pd->port_rcvhdrq_size);
2449 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2450 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2451 pd->port_rcvhdrq = NULL;
2452 if (pd->port_rcvhdrtail_kvaddr) {
2453 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2454 pd->port_rcvhdrtail_kvaddr,
2455 pd->port_rcvhdrqtailaddr_phys);
2456 pd->port_rcvhdrtail_kvaddr = NULL;
2457 }
2458 }
2459 if (pd->port_port && pd->port_rcvegrbuf) {
2460 unsigned e;
2461
2462 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2463 void *base = pd->port_rcvegrbuf[e];
2464 size_t size = pd->port_rcvegrbuf_size;
2465
2466 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2467 "chunk %u/%u\n", base,
2468 (unsigned long) size,
2469 e, pd->port_rcvegrbuf_chunks);
2470 dma_free_coherent(&dd->pcidev->dev, size,
2471 base, pd->port_rcvegrbuf_phys[e]);
2472 }
2473 kfree(pd->port_rcvegrbuf);
2474 pd->port_rcvegrbuf = NULL;
2475 kfree(pd->port_rcvegrbuf_phys);
2476 pd->port_rcvegrbuf_phys = NULL;
2477 pd->port_rcvegrbuf_chunks = 0;
2478 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2479 unsigned e;
2480 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2481
2482 dd->ipath_port0_skbinfo = NULL;
2483 ipath_cdbg(VERBOSE, "free closed port %d "
2484 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2485 skbinfo);
2486 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2487 if (skbinfo[e].skb) {
2488 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2489 dd->ipath_ibmaxlen,
2490 PCI_DMA_FROMDEVICE);
2491 dev_kfree_skb(skbinfo[e].skb);
2492 }
2493 vfree(skbinfo);
2494 }
2495 kfree(pd->port_tid_pg_list);
2496 vfree(pd->subport_uregbase);
2497 vfree(pd->subport_rcvegrbuf);
2498 vfree(pd->subport_rcvhdr_base);
2499 kfree(pd);
2500}
2501
2502static int __init infinipath_init(void)
2503{
2504 int ret;
2505
2506 if (ipath_debug & __IPATH_DBG)
2507 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
2508
2509
2510
2511
2512
2513 idr_init(&unit_table);
2514 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2515 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2516 ret = -ENOMEM;
2517 goto bail;
2518 }
2519
2520 ret = pci_register_driver(&ipath_driver);
2521 if (ret < 0) {
2522 printk(KERN_ERR IPATH_DRV_NAME
2523 ": Unable to register driver: error %d\n", -ret);
2524 goto bail_unit;
2525 }
2526
2527 ret = ipath_init_ipathfs();
2528 if (ret < 0) {
2529 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2530 "ipathfs: error %d\n", -ret);
2531 goto bail_pci;
2532 }
2533
2534 goto bail;
2535
2536bail_pci:
2537 pci_unregister_driver(&ipath_driver);
2538
2539bail_unit:
2540 idr_destroy(&unit_table);
2541
2542bail:
2543 return ret;
2544}
2545
2546static void __exit infinipath_cleanup(void)
2547{
2548 ipath_exit_ipathfs();
2549
2550 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2551 pci_unregister_driver(&ipath_driver);
2552
2553 idr_destroy(&unit_table);
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565int ipath_reset_device(int unit)
2566{
2567 int ret, i;
2568 struct ipath_devdata *dd = ipath_lookup(unit);
2569 unsigned long flags;
2570
2571 if (!dd) {
2572 ret = -ENODEV;
2573 goto bail;
2574 }
2575
2576 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2577
2578 del_timer_sync(&dd->ipath_led_override_timer);
2579 atomic_set(&dd->ipath_led_override_timer_active, 0);
2580 }
2581
2582
2583 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2584 dd->ipath_f_setextled(dd, 0, 0);
2585
2586 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2587
2588 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2589 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2590 "not initialized or not present\n", unit);
2591 ret = -ENXIO;
2592 goto bail;
2593 }
2594
2595 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2596 if (dd->ipath_pd)
2597 for (i = 1; i < dd->ipath_cfgports; i++) {
2598 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2599 continue;
2600 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2601 ipath_dbg("unit %u port %d is in use "
2602 "(PID %u cmd %s), can't reset\n",
2603 unit, i,
2604 pid_nr(dd->ipath_pd[i]->port_pid),
2605 dd->ipath_pd[i]->port_comm);
2606 ret = -EBUSY;
2607 goto bail;
2608 }
2609 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2610
2611 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2612 teardown_sdma(dd);
2613
2614 dd->ipath_flags &= ~IPATH_INITTED;
2615 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2616 ret = dd->ipath_f_reset(dd);
2617 if (ret == 1) {
2618 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2619 unit);
2620 ret = ipath_init_chip(dd, 1);
2621 } else
2622 ret = -EAGAIN;
2623 if (ret)
2624 ipath_dev_err(dd, "Reinitialize unit %u after "
2625 "reset failed with %d\n", unit, ret);
2626 else
2627 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2628 "resetting\n", unit);
2629
2630bail:
2631 return ret;
2632}
2633
2634
2635
2636
2637
2638
2639static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2640{
2641 int i, sub, any = 0;
2642 struct pid *pid;
2643 unsigned long flags;
2644
2645 if (!dd->ipath_pd)
2646 return 0;
2647
2648 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2649 for (i = 1; i < dd->ipath_cfgports; i++) {
2650 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2651 continue;
2652 pid = dd->ipath_pd[i]->port_pid;
2653 if (!pid)
2654 continue;
2655
2656 dev_info(&dd->pcidev->dev, "context %d in use "
2657 "(PID %u), sending signal %d\n",
2658 i, pid_nr(pid), sig);
2659 kill_pid(pid, sig, 1);
2660 any++;
2661 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2662 pid = dd->ipath_pd[i]->port_subpid[sub];
2663 if (!pid)
2664 continue;
2665 dev_info(&dd->pcidev->dev, "sub-context "
2666 "%d:%d in use (PID %u), sending "
2667 "signal %d\n", i, sub, pid_nr(pid), sig);
2668 kill_pid(pid, sig, 1);
2669 any++;
2670 }
2671 }
2672 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2673 return any;
2674}
2675
2676static void ipath_hol_signal_down(struct ipath_devdata *dd)
2677{
2678 if (ipath_signal_procs(dd, SIGSTOP))
2679 ipath_dbg("Stopped some processes\n");
2680 ipath_cancel_sends(dd, 1);
2681}
2682
2683
2684static void ipath_hol_signal_up(struct ipath_devdata *dd)
2685{
2686 if (ipath_signal_procs(dd, SIGCONT))
2687 ipath_dbg("Continued some processes\n");
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697void ipath_hol_down(struct ipath_devdata *dd)
2698{
2699 dd->ipath_hol_state = IPATH_HOL_DOWN;
2700 ipath_hol_signal_down(dd);
2701 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2702 dd->ipath_hol_timer.expires = jiffies +
2703 msecs_to_jiffies(ipath_hol_timeout_ms);
2704 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2705}
2706
2707
2708
2709
2710
2711
2712void ipath_hol_up(struct ipath_devdata *dd)
2713{
2714 ipath_hol_signal_up(dd);
2715 dd->ipath_hol_state = IPATH_HOL_UP;
2716}
2717
2718
2719
2720
2721
2722
2723
2724void ipath_hol_event(unsigned long opaque)
2725{
2726 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2727
2728 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2729 && dd->ipath_hol_state != IPATH_HOL_UP) {
2730 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2731 ipath_dbg("Stopping processes\n");
2732 ipath_hol_signal_down(dd);
2733 } else {
2734 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2735 ipath_dbg("Continuing processes\n");
2736 ipath_hol_signal_up(dd);
2737 }
2738 if (dd->ipath_hol_state == IPATH_HOL_UP)
2739 ipath_dbg("link's up, don't resched timer\n");
2740 else {
2741 dd->ipath_hol_timer.expires = jiffies +
2742 msecs_to_jiffies(ipath_hol_timeout_ms);
2743 mod_timer(&dd->ipath_hol_timer,
2744 dd->ipath_hol_timer.expires);
2745 }
2746}
2747
2748int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2749{
2750 u64 val;
2751
2752 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2753 return -1;
2754 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2755 dd->ipath_rx_pol_inv = new_pol_inv;
2756 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2757 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2758 INFINIPATH_XGXS_RX_POL_SHIFT);
2759 val |= ((u64)dd->ipath_rx_pol_inv) <<
2760 INFINIPATH_XGXS_RX_POL_SHIFT;
2761 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2762 }
2763 return 0;
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775void ipath_enable_armlaunch(struct ipath_devdata *dd)
2776{
2777 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2778 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2779 INFINIPATH_E_SPIOARMLAUNCH);
2780 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2781 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2782 dd->ipath_errormask);
2783}
2784
2785void ipath_disable_armlaunch(struct ipath_devdata *dd)
2786{
2787
2788 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2789 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2790 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2791 dd->ipath_errormask);
2792}
2793
2794module_init(infinipath_init);
2795module_exit(infinipath_cleanup);
2796