1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/sched.h>
35#include <linux/spinlock.h>
36#include <linux/idr.h>
37#include <linux/pci.h>
38#include <linux/io.h>
39#include <linux/delay.h>
40#include <linux/netdevice.h>
41#include <linux/vmalloc.h>
42
43#include "ipath_kernel.h"
44#include "ipath_verbs.h"
45
46static void ipath_update_pio_bufs(struct ipath_devdata *);
47
48const char *ipath_get_unit_name(int unit)
49{
50 static char iname[16];
51 snprintf(iname, sizeof iname, "infinipath%u", unit);
52 return iname;
53}
54
55#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
56#define PFX IPATH_DRV_NAME ": "
57
58
59
60
61
62const char ib_ipath_version[] = IPATH_IDSTR "\n";
63
64static struct idr unit_table;
65DEFINE_SPINLOCK(ipath_devs_lock);
66LIST_HEAD(ipath_dev_list);
67
68wait_queue_head_t ipath_state_wait;
69
70unsigned ipath_debug = __IPATH_INFO;
71
72module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
73MODULE_PARM_DESC(debug, "mask for debug prints");
74EXPORT_SYMBOL_GPL(ipath_debug);
75
76unsigned ipath_mtu4096 = 1;
77module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
78MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
79
80static unsigned ipath_hol_timeout_ms = 13000;
81module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
82MODULE_PARM_DESC(hol_timeout_ms,
83 "duration of user app suspension after link failure");
84
85unsigned ipath_linkrecovery = 1;
86module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
87MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
88
89MODULE_LICENSE("GPL");
90MODULE_AUTHOR("QLogic <support@qlogic.com>");
91MODULE_DESCRIPTION("QLogic InfiniPath driver");
92
93
94
95
96
97const char *ipath_ibcstatus_str[] = {
98 "Disabled",
99 "LinkUp",
100 "PollActive",
101 "PollQuiet",
102 "SleepDelay",
103 "SleepQuiet",
104 "LState6",
105 "LState7",
106 "CfgDebounce",
107 "CfgRcvfCfg",
108 "CfgWaitRmt",
109 "CfgIdle",
110 "RecovRetrain",
111 "CfgTxRevLane",
112 "RecovWaitRmt",
113 "RecovIdle",
114
115 "CfgEnhanced",
116 "CfgTest",
117 "CfgWaitRmtTest",
118 "CfgWaitCfgEnhanced",
119 "SendTS_T",
120 "SendTstIdles",
121 "RcvTS_T",
122 "SendTst_TS1s",
123 "LTState18", "LTState19", "LTState1A", "LTState1B",
124 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
125};
126
127static void __devexit ipath_remove_one(struct pci_dev *);
128static int __devinit ipath_init_one(struct pci_dev *,
129 const struct pci_device_id *);
130
131
132#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
133#define PCI_VENDOR_ID_QLOGIC 0x1077
134#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
135#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
136#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
137
138
139#define STATUS_TIMEOUT 60
140
141static const struct pci_device_id ipath_pci_tbl[] = {
142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
143 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
144 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
145 { 0, }
146};
147
148MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
149
150static struct pci_driver ipath_driver = {
151 .name = IPATH_DRV_NAME,
152 .probe = ipath_init_one,
153 .remove = __devexit_p(ipath_remove_one),
154 .id_table = ipath_pci_tbl,
155 .driver = {
156 .groups = ipath_driver_attr_groups,
157 },
158};
159
160static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
161 u32 *bar0, u32 *bar1)
162{
163 int ret;
164
165 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
166 if (ret)
167 ipath_dev_err(dd, "failed to read bar0 before enable: "
168 "error %d\n", -ret);
169
170 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
171 if (ret)
172 ipath_dev_err(dd, "failed to read bar1 before enable: "
173 "error %d\n", -ret);
174
175 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
176}
177
178static void ipath_free_devdata(struct pci_dev *pdev,
179 struct ipath_devdata *dd)
180{
181 unsigned long flags;
182
183 pci_set_drvdata(pdev, NULL);
184
185 if (dd->ipath_unit != -1) {
186 spin_lock_irqsave(&ipath_devs_lock, flags);
187 idr_remove(&unit_table, dd->ipath_unit);
188 list_del(&dd->ipath_list);
189 spin_unlock_irqrestore(&ipath_devs_lock, flags);
190 }
191 vfree(dd);
192}
193
194static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
195{
196 unsigned long flags;
197 struct ipath_devdata *dd;
198 int ret;
199
200 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
201 dd = ERR_PTR(-ENOMEM);
202 goto bail;
203 }
204
205 dd = vmalloc(sizeof(*dd));
206 if (!dd) {
207 dd = ERR_PTR(-ENOMEM);
208 goto bail;
209 }
210 memset(dd, 0, sizeof(*dd));
211 dd->ipath_unit = -1;
212
213 spin_lock_irqsave(&ipath_devs_lock, flags);
214
215 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
216 if (ret < 0) {
217 printk(KERN_ERR IPATH_DRV_NAME
218 ": Could not allocate unit ID: error %d\n", -ret);
219 ipath_free_devdata(pdev, dd);
220 dd = ERR_PTR(ret);
221 goto bail_unlock;
222 }
223
224 dd->pcidev = pdev;
225 pci_set_drvdata(pdev, dd);
226
227 list_add(&dd->ipath_list, &ipath_dev_list);
228
229bail_unlock:
230 spin_unlock_irqrestore(&ipath_devs_lock, flags);
231
232bail:
233 return dd;
234}
235
236static inline struct ipath_devdata *__ipath_lookup(int unit)
237{
238 return idr_find(&unit_table, unit);
239}
240
241struct ipath_devdata *ipath_lookup(int unit)
242{
243 struct ipath_devdata *dd;
244 unsigned long flags;
245
246 spin_lock_irqsave(&ipath_devs_lock, flags);
247 dd = __ipath_lookup(unit);
248 spin_unlock_irqrestore(&ipath_devs_lock, flags);
249
250 return dd;
251}
252
253int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
254{
255 int nunits, npresent, nup;
256 struct ipath_devdata *dd;
257 unsigned long flags;
258 int maxports;
259
260 nunits = npresent = nup = maxports = 0;
261
262 spin_lock_irqsave(&ipath_devs_lock, flags);
263
264 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
265 nunits++;
266 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
267 npresent++;
268 if (dd->ipath_lid &&
269 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
270 | IPATH_LINKUNK)))
271 nup++;
272 if (dd->ipath_cfgports > maxports)
273 maxports = dd->ipath_cfgports;
274 }
275
276 spin_unlock_irqrestore(&ipath_devs_lock, flags);
277
278 if (npresentp)
279 *npresentp = npresent;
280 if (nupp)
281 *nupp = nup;
282 if (maxportsp)
283 *maxportsp = maxports;
284
285 return nunits;
286}
287
288
289
290
291
292
293
294int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
295{
296 return -EOPNOTSUPP;
297}
298
299void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
300{
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316static void ipath_verify_pioperf(struct ipath_devdata *dd)
317{
318 u32 pbnum, cnt, lcnt;
319 u32 __iomem *piobuf;
320 u32 *addr;
321 u64 msecs, emsecs;
322
323 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
324 if (!piobuf) {
325 dev_info(&dd->pcidev->dev,
326 "No PIObufs for checking perf, skipping\n");
327 return;
328 }
329
330
331
332
333
334 cnt = 1024;
335
336 addr = vmalloc(cnt);
337 if (!addr) {
338 dev_info(&dd->pcidev->dev,
339 "Couldn't get memory for checking PIO perf,"
340 " skipping\n");
341 goto done;
342 }
343
344 preempt_disable();
345 msecs = 1 + jiffies_to_msecs(jiffies);
346 for (lcnt = 0; lcnt < 10000U; lcnt++) {
347
348 if (jiffies_to_msecs(jiffies) >= msecs)
349 break;
350 udelay(1);
351 }
352
353 ipath_disable_armlaunch(dd);
354
355
356
357
358
359 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
360 writeq(1UL << 63, piobuf);
361 else
362 writeq(0, piobuf);
363 ipath_flush_wc();
364
365
366
367
368
369
370 msecs = jiffies_to_msecs(jiffies);
371 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
372 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
373 emsecs = jiffies_to_msecs(jiffies) - msecs;
374 }
375
376
377 if (lcnt < (emsecs * 1024U))
378 ipath_dev_err(dd,
379 "Performance problem: bandwidth to PIO buffers is "
380 "only %u MiB/sec\n",
381 lcnt / (u32) emsecs);
382 else
383 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
384 lcnt / (u32) emsecs);
385
386 preempt_enable();
387
388 vfree(addr);
389
390done:
391
392 ipath_disarm_piobufs(dd, pbnum, 1);
393 ipath_enable_armlaunch(dd);
394}
395
396static int __devinit ipath_init_one(struct pci_dev *pdev,
397 const struct pci_device_id *ent)
398{
399 int ret, len, j;
400 struct ipath_devdata *dd;
401 unsigned long long addr;
402 u32 bar0 = 0, bar1 = 0;
403 u8 rev;
404
405 dd = ipath_alloc_devdata(pdev);
406 if (IS_ERR(dd)) {
407 ret = PTR_ERR(dd);
408 printk(KERN_ERR IPATH_DRV_NAME
409 ": Could not allocate devdata: error %d\n", -ret);
410 goto bail;
411 }
412
413 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
414
415 ret = pci_enable_device(pdev);
416 if (ret) {
417
418
419
420
421
422
423
424
425
426
427
428
429 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
430 dd->ipath_unit, -ret);
431 goto bail_devdata;
432 }
433 addr = pci_resource_start(pdev, 0);
434 len = pci_resource_len(pdev, 0);
435 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
436 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
437 ent->device, ent->driver_data);
438
439 read_bars(dd, pdev, &bar0, &bar1);
440
441 if (!bar1 && !(bar0 & ~0xf)) {
442 if (addr) {
443 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
444 "rewriting as %llx\n", addr);
445 ret = pci_write_config_dword(
446 pdev, PCI_BASE_ADDRESS_0, addr);
447 if (ret) {
448 ipath_dev_err(dd, "rewrite of BAR0 "
449 "failed: err %d\n", -ret);
450 goto bail_disable;
451 }
452 ret = pci_write_config_dword(
453 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
454 if (ret) {
455 ipath_dev_err(dd, "rewrite of BAR1 "
456 "failed: err %d\n", -ret);
457 goto bail_disable;
458 }
459 } else {
460 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
461 "not usable until reboot\n");
462 ret = -ENODEV;
463 goto bail_disable;
464 }
465 }
466
467 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
468 if (ret) {
469 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
470 "err %d\n", dd->ipath_unit, -ret);
471 goto bail_disable;
472 }
473
474 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
475 if (ret) {
476
477
478
479
480
481 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
482 if (ret) {
483 dev_info(&pdev->dev,
484 "Unable to set DMA mask for unit %u: %d\n",
485 dd->ipath_unit, ret);
486 goto bail_regions;
487 }
488 else {
489 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
490 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
491 if (ret)
492 dev_info(&pdev->dev,
493 "Unable to set DMA consistent mask "
494 "for unit %u: %d\n",
495 dd->ipath_unit, ret);
496
497 }
498 }
499 else {
500 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
501 if (ret)
502 dev_info(&pdev->dev,
503 "Unable to set DMA consistent mask "
504 "for unit %u: %d\n",
505 dd->ipath_unit, ret);
506 }
507
508 pci_set_master(pdev);
509
510
511
512
513
514 dd->ipath_pcibar0 = addr;
515 dd->ipath_pcibar1 = addr >> 32;
516 dd->ipath_deviceid = ent->device;
517 dd->ipath_vendorid = ent->vendor;
518
519
520 switch (ent->device) {
521 case PCI_DEVICE_ID_INFINIPATH_HT:
522#ifdef CONFIG_HT_IRQ
523 ipath_init_iba6110_funcs(dd);
524 break;
525#else
526 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
527 "CONFIG_HT_IRQ is not enabled\n", ent->device);
528 return -ENODEV;
529#endif
530 case PCI_DEVICE_ID_INFINIPATH_PE800:
531#ifdef CONFIG_PCI_MSI
532 ipath_init_iba6120_funcs(dd);
533 break;
534#else
535 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
536 "CONFIG_PCI_MSI is not enabled\n", ent->device);
537 return -ENODEV;
538#endif
539 case PCI_DEVICE_ID_INFINIPATH_7220:
540#ifndef CONFIG_PCI_MSI
541 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
542 "using INTx for unit %u\n", dd->ipath_unit);
543#endif
544 ipath_init_iba7220_funcs(dd);
545 break;
546 default:
547 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
548 "failing\n", ent->device);
549 return -ENODEV;
550 }
551
552 for (j = 0; j < 6; j++) {
553 if (!pdev->resource[j].start)
554 continue;
555 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
556 j, (unsigned long long)pdev->resource[j].start,
557 (unsigned long long)pdev->resource[j].end,
558 (unsigned long long)pci_resource_len(pdev, j));
559 }
560
561 if (!addr) {
562 ipath_dev_err(dd, "No valid address in BAR 0!\n");
563 ret = -ENODEV;
564 goto bail_regions;
565 }
566
567 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
568 if (ret) {
569 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
570 "%u: err %d\n", dd->ipath_unit, -ret);
571 goto bail_regions;
572 }
573 dd->ipath_pcirev = rev;
574
575#if defined(__powerpc__)
576
577 dd->ipath_kregbase = __ioremap(addr, len,
578 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
579#else
580 dd->ipath_kregbase = ioremap_nocache(addr, len);
581#endif
582
583 if (!dd->ipath_kregbase) {
584 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
585 addr);
586 ret = -ENOMEM;
587 goto bail_iounmap;
588 }
589 dd->ipath_kregend = (u64 __iomem *)
590 ((void __iomem *)dd->ipath_kregbase + len);
591 dd->ipath_physaddr = addr;
592
593 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
594 addr, dd->ipath_kregbase);
595
596 if (dd->ipath_f_bus(dd, pdev))
597 ipath_dev_err(dd, "Failed to setup config space; "
598 "continuing anyway\n");
599
600
601
602
603
604
605
606 if (!dd->ipath_irq)
607 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
608 "work\n");
609 else {
610 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
611 IPATH_DRV_NAME, dd);
612 if (ret) {
613 ipath_dev_err(dd, "Couldn't setup irq handler, "
614 "irq=%d: %d\n", dd->ipath_irq, ret);
615 goto bail_iounmap;
616 }
617 }
618
619 ret = ipath_init_chip(dd, 0);
620 if (ret)
621 goto bail_irqsetup;
622
623 ret = ipath_enable_wc(dd);
624
625 if (ret) {
626 ipath_dev_err(dd, "Write combining not enabled "
627 "(err %d): performance may be poor\n",
628 -ret);
629 ret = 0;
630 }
631
632 ipath_verify_pioperf(dd);
633
634 ipath_device_create_group(&pdev->dev, dd);
635 ipathfs_add_device(dd);
636 ipath_user_add(dd);
637 ipath_diag_add(dd);
638 ipath_register_ib_device(dd);
639
640 goto bail;
641
642bail_irqsetup:
643 if (pdev->irq)
644 free_irq(pdev->irq, dd);
645
646bail_iounmap:
647 iounmap((volatile void __iomem *) dd->ipath_kregbase);
648
649bail_regions:
650 pci_release_regions(pdev);
651
652bail_disable:
653 pci_disable_device(pdev);
654
655bail_devdata:
656 ipath_free_devdata(pdev, dd);
657
658bail:
659 return ret;
660}
661
662static void __devexit cleanup_device(struct ipath_devdata *dd)
663{
664 int port;
665 struct ipath_portdata **tmp;
666 unsigned long flags;
667
668 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
669
670 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
671 if (dd->ipath_kregbase) {
672
673
674
675
676
677 dd->ipath_kregbase = NULL;
678 dd->ipath_uregbase = 0;
679 dd->ipath_sregbase = 0;
680 dd->ipath_cregbase = 0;
681 dd->ipath_kregsize = 0;
682 }
683 ipath_disable_wc(dd);
684 }
685
686 if (dd->ipath_spectriggerhit)
687 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
688 dd->ipath_spectriggerhit);
689
690 if (dd->ipath_pioavailregs_dma) {
691 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
692 (void *) dd->ipath_pioavailregs_dma,
693 dd->ipath_pioavailregs_phys);
694 dd->ipath_pioavailregs_dma = NULL;
695 }
696 if (dd->ipath_dummy_hdrq) {
697 dma_free_coherent(&dd->pcidev->dev,
698 dd->ipath_pd[0]->port_rcvhdrq_size,
699 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
700 dd->ipath_dummy_hdrq = NULL;
701 }
702
703 if (dd->ipath_pageshadow) {
704 struct page **tmpp = dd->ipath_pageshadow;
705 dma_addr_t *tmpd = dd->ipath_physshadow;
706 int i, cnt = 0;
707
708 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
709 "locked\n");
710 for (port = 0; port < dd->ipath_cfgports; port++) {
711 int port_tidbase = port * dd->ipath_rcvtidcnt;
712 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
713 for (i = port_tidbase; i < maxtid; i++) {
714 if (!tmpp[i])
715 continue;
716 pci_unmap_page(dd->pcidev, tmpd[i],
717 PAGE_SIZE, PCI_DMA_FROMDEVICE);
718 ipath_release_user_pages(&tmpp[i], 1);
719 tmpp[i] = NULL;
720 cnt++;
721 }
722 }
723 if (cnt) {
724 ipath_stats.sps_pageunlocks += cnt;
725 ipath_cdbg(VERBOSE, "There were still %u expTID "
726 "entries locked\n", cnt);
727 }
728 if (ipath_stats.sps_pagelocks ||
729 ipath_stats.sps_pageunlocks)
730 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
731 "unlocked via ipath_m{un}lock\n",
732 (unsigned long long)
733 ipath_stats.sps_pagelocks,
734 (unsigned long long)
735 ipath_stats.sps_pageunlocks);
736
737 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
738 dd->ipath_pageshadow);
739 tmpp = dd->ipath_pageshadow;
740 dd->ipath_pageshadow = NULL;
741 vfree(tmpp);
742
743 dd->ipath_egrtidbase = NULL;
744 }
745
746
747
748
749
750
751
752
753 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
754 tmp = dd->ipath_pd;
755 dd->ipath_pd = NULL;
756 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
757 for (port = 0; port < dd->ipath_portcnt; port++) {
758 struct ipath_portdata *pd = tmp[port];
759 tmp[port] = NULL;
760 ipath_free_pddata(dd, pd);
761 }
762 kfree(tmp);
763}
764
765static void __devexit ipath_remove_one(struct pci_dev *pdev)
766{
767 struct ipath_devdata *dd = pci_get_drvdata(pdev);
768
769 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
770
771
772
773
774
775 ipath_shutdown_device(dd);
776
777 flush_scheduled_work();
778
779 if (dd->verbs_dev)
780 ipath_unregister_ib_device(dd->verbs_dev);
781
782 ipath_diag_remove(dd);
783 ipath_user_remove(dd);
784 ipathfs_remove_device(dd);
785 ipath_device_remove_group(&pdev->dev, dd);
786
787 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
788 "unit %u\n", dd, (u32) dd->ipath_unit);
789
790 cleanup_device(dd);
791
792
793
794
795
796
797
798 if (dd->ipath_irq) {
799 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
800 dd->ipath_unit, dd->ipath_irq);
801 dd->ipath_f_free_irq(dd);
802 } else
803 ipath_dbg("irq is 0, not doing free_irq "
804 "for unit %u\n", dd->ipath_unit);
805
806
807
808
809
810
811 if (dd->ipath_f_cleanup)
812
813 dd->ipath_f_cleanup(dd);
814
815 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
816 iounmap((volatile void __iomem *) dd->ipath_kregbase);
817 pci_release_regions(pdev);
818 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
819 pci_disable_device(pdev);
820
821 ipath_free_devdata(pdev, dd);
822}
823
824
825DEFINE_MUTEX(ipath_mutex);
826
827static DEFINE_SPINLOCK(ipath_pioavail_lock);
828
829
830
831
832
833
834
835
836
837
838
839
840void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
841 unsigned cnt)
842{
843 unsigned i, last = first + cnt;
844 unsigned long flags;
845
846 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
847 for (i = first; i < last; i++) {
848 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
849
850
851
852
853
854 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
855 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
856 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
857
858 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
859 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
860 }
861
862 ipath_force_pio_avail_update(dd);
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
878{
879 dd->ipath_state_wanted = state;
880 wait_event_interruptible_timeout(ipath_state_wait,
881 (dd->ipath_flags & state),
882 msecs_to_jiffies(msecs));
883 dd->ipath_state_wanted = 0;
884
885 if (!(dd->ipath_flags & state)) {
886 u64 val;
887 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
888 " ms\n",
889
890 (state & IPATH_LINKINIT) ? "INIT" :
891 ((state & IPATH_LINKDOWN) ? "DOWN" :
892 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
893 msecs);
894 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
895 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
896 (unsigned long long) ipath_read_kreg64(
897 dd, dd->ipath_kregs->kr_ibcctrl),
898 (unsigned long long) val,
899 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
900 }
901 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
902}
903
904static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
905 char *buf, size_t blen)
906{
907 static const struct {
908 ipath_err_t err;
909 const char *msg;
910 } errs[] = {
911 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
912 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
913 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
914 { INFINIPATH_E_SDMABASE, "SDmaBase" },
915 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
916 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
917 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
918 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
919 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
920 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
921 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
922 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
923 };
924 int i;
925 int expected;
926 size_t bidx = 0;
927
928 for (i = 0; i < ARRAY_SIZE(errs); i++) {
929 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
930 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
931 if ((err & errs[i].err) && !expected)
932 bidx += snprintf(buf + bidx, blen - bidx,
933 "%s ", errs[i].msg);
934 }
935}
936
937
938
939
940
941
942
943int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
944 ipath_err_t err)
945{
946 int iserr = 1;
947 *buf = '\0';
948 if (err & INFINIPATH_E_PKTERRS) {
949 if (!(err & ~INFINIPATH_E_PKTERRS))
950 iserr = 0;
951 if (ipath_debug & __IPATH_ERRPKTDBG) {
952 if (err & INFINIPATH_E_REBP)
953 strlcat(buf, "EBP ", blen);
954 if (err & INFINIPATH_E_RVCRC)
955 strlcat(buf, "VCRC ", blen);
956 if (err & INFINIPATH_E_RICRC) {
957 strlcat(buf, "CRC ", blen);
958
959 err &= INFINIPATH_E_RICRC;
960 }
961 if (err & INFINIPATH_E_RSHORTPKTLEN)
962 strlcat(buf, "rshortpktlen ", blen);
963 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
964 strlcat(buf, "sdroppeddatapkt ", blen);
965 if (err & INFINIPATH_E_SPKTLEN)
966 strlcat(buf, "spktlen ", blen);
967 }
968 if ((err & INFINIPATH_E_RICRC) &&
969 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
970 strlcat(buf, "CRC ", blen);
971 if (!iserr)
972 goto done;
973 }
974 if (err & INFINIPATH_E_RHDRLEN)
975 strlcat(buf, "rhdrlen ", blen);
976 if (err & INFINIPATH_E_RBADTID)
977 strlcat(buf, "rbadtid ", blen);
978 if (err & INFINIPATH_E_RBADVERSION)
979 strlcat(buf, "rbadversion ", blen);
980 if (err & INFINIPATH_E_RHDR)
981 strlcat(buf, "rhdr ", blen);
982 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
983 strlcat(buf, "sendspecialtrigger ", blen);
984 if (err & INFINIPATH_E_RLONGPKTLEN)
985 strlcat(buf, "rlongpktlen ", blen);
986 if (err & INFINIPATH_E_RMAXPKTLEN)
987 strlcat(buf, "rmaxpktlen ", blen);
988 if (err & INFINIPATH_E_RMINPKTLEN)
989 strlcat(buf, "rminpktlen ", blen);
990 if (err & INFINIPATH_E_SMINPKTLEN)
991 strlcat(buf, "sminpktlen ", blen);
992 if (err & INFINIPATH_E_RFORMATERR)
993 strlcat(buf, "rformaterr ", blen);
994 if (err & INFINIPATH_E_RUNSUPVL)
995 strlcat(buf, "runsupvl ", blen);
996 if (err & INFINIPATH_E_RUNEXPCHAR)
997 strlcat(buf, "runexpchar ", blen);
998 if (err & INFINIPATH_E_RIBFLOW)
999 strlcat(buf, "ribflow ", blen);
1000 if (err & INFINIPATH_E_SUNDERRUN)
1001 strlcat(buf, "sunderrun ", blen);
1002 if (err & INFINIPATH_E_SPIOARMLAUNCH)
1003 strlcat(buf, "spioarmlaunch ", blen);
1004 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
1005 strlcat(buf, "sunexperrpktnum ", blen);
1006 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
1007 strlcat(buf, "sdroppedsmppkt ", blen);
1008 if (err & INFINIPATH_E_SMAXPKTLEN)
1009 strlcat(buf, "smaxpktlen ", blen);
1010 if (err & INFINIPATH_E_SUNSUPVL)
1011 strlcat(buf, "sunsupVL ", blen);
1012 if (err & INFINIPATH_E_INVALIDADDR)
1013 strlcat(buf, "invalidaddr ", blen);
1014 if (err & INFINIPATH_E_RRCVEGRFULL)
1015 strlcat(buf, "rcvegrfull ", blen);
1016 if (err & INFINIPATH_E_RRCVHDRFULL)
1017 strlcat(buf, "rcvhdrfull ", blen);
1018 if (err & INFINIPATH_E_IBSTATUSCHANGED)
1019 strlcat(buf, "ibcstatuschg ", blen);
1020 if (err & INFINIPATH_E_RIBLOSTLINK)
1021 strlcat(buf, "riblostlink ", blen);
1022 if (err & INFINIPATH_E_HARDWARE)
1023 strlcat(buf, "hardware ", blen);
1024 if (err & INFINIPATH_E_RESET)
1025 strlcat(buf, "reset ", blen);
1026 if (err & INFINIPATH_E_SDMAERRS)
1027 decode_sdma_errs(dd, err, buf, blen);
1028 if (err & INFINIPATH_E_INVALIDEEPCMD)
1029 strlcat(buf, "invalideepromcmd ", blen);
1030done:
1031 return iserr;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042static void get_rhf_errstring(u32 err, char *msg, size_t len)
1043{
1044
1045 *msg = '\0';
1046
1047 if (err & INFINIPATH_RHF_H_ICRCERR)
1048 strlcat(msg, "icrcerr ", len);
1049 if (err & INFINIPATH_RHF_H_VCRCERR)
1050 strlcat(msg, "vcrcerr ", len);
1051 if (err & INFINIPATH_RHF_H_PARITYERR)
1052 strlcat(msg, "parityerr ", len);
1053 if (err & INFINIPATH_RHF_H_LENERR)
1054 strlcat(msg, "lenerr ", len);
1055 if (err & INFINIPATH_RHF_H_MTUERR)
1056 strlcat(msg, "mtuerr ", len);
1057 if (err & INFINIPATH_RHF_H_IHDRERR)
1058
1059 strlcat(msg, "ipathhdrerr ", len);
1060 if (err & INFINIPATH_RHF_H_TIDERR)
1061 strlcat(msg, "tiderr ", len);
1062 if (err & INFINIPATH_RHF_H_MKERR)
1063
1064 strlcat(msg, "invalid ipathhdr ", len);
1065 if (err & INFINIPATH_RHF_H_IBERR)
1066 strlcat(msg, "iberr ", len);
1067 if (err & INFINIPATH_RHF_L_SWA)
1068 strlcat(msg, "swA ", len);
1069 if (err & INFINIPATH_RHF_L_SWB)
1070 strlcat(msg, "swB ", len);
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1081{
1082 return dd->ipath_port0_skbinfo ?
1083 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1084}
1085
1086
1087
1088
1089
1090
1091struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1092 gfp_t gfp_mask)
1093{
1094 struct sk_buff *skb;
1095 u32 len;
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 len = dd->ipath_ibmaxlen + 4;
1110
1111 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1112
1113
1114
1115
1116 len += 2047;
1117 }
1118
1119 skb = __dev_alloc_skb(len, gfp_mask);
1120 if (!skb) {
1121 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1122 len);
1123 goto bail;
1124 }
1125
1126 skb_reserve(skb, 4);
1127
1128 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1129 u32 una = (unsigned long)skb->data & 2047;
1130 if (una)
1131 skb_reserve(skb, 2048 - una);
1132 }
1133
1134bail:
1135 return skb;
1136}
1137
1138static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1139 u32 eflags,
1140 u32 l,
1141 u32 etail,
1142 __le32 *rhf_addr,
1143 struct ipath_message_header *hdr)
1144{
1145 char emsg[128];
1146
1147 get_rhf_errstring(eflags, emsg, sizeof emsg);
1148 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1149 "tlen=%x opcode=%x egridx=%x: %s\n",
1150 eflags, l,
1151 ipath_hdrget_rcv_type(rhf_addr),
1152 ipath_hdrget_length_in_bytes(rhf_addr),
1153 be32_to_cpu(hdr->bth[0]) >> 24,
1154 etail, emsg);
1155
1156
1157 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1158 u8 n = (dd->ipath_ibcctrl >>
1159 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1160 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1161
1162 if (++dd->ipath_lli_counter > n) {
1163 dd->ipath_lli_counter = 0;
1164 dd->ipath_lli_errors++;
1165 }
1166 }
1167}
1168
1169
1170
1171
1172
1173
1174
1175void ipath_kreceive(struct ipath_portdata *pd)
1176{
1177 struct ipath_devdata *dd = pd->port_dd;
1178 __le32 *rhf_addr;
1179 void *ebuf;
1180 const u32 rsize = dd->ipath_rcvhdrentsize;
1181 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;
1182 u32 etail = -1, l, hdrqtail;
1183 struct ipath_message_header *hdr;
1184 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1185 static u64 totcalls;
1186 int last;
1187
1188 l = pd->port_head;
1189 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1190 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1191 u32 seq = ipath_hdrget_seq(rhf_addr);
1192
1193 if (seq != pd->port_seq_cnt)
1194 goto bail;
1195 hdrqtail = 0;
1196 } else {
1197 hdrqtail = ipath_get_rcvhdrtail(pd);
1198 if (l == hdrqtail)
1199 goto bail;
1200 smp_rmb();
1201 }
1202
1203reloop:
1204 for (last = 0, i = 1; !last; i += !last) {
1205 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1206 eflags = ipath_hdrget_err_flags(rhf_addr);
1207 etype = ipath_hdrget_rcv_type(rhf_addr);
1208
1209 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1210 ebuf = NULL;
1211 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1212 ipath_hdrget_use_egr_buf(rhf_addr) :
1213 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1214
1215
1216
1217
1218
1219
1220
1221 etail = ipath_hdrget_index(rhf_addr);
1222 updegr = 1;
1223 if (tlen > sizeof(*hdr) ||
1224 etype == RCVHQ_RCV_TYPE_NON_KD)
1225 ebuf = ipath_get_egrbuf(dd, etail);
1226 }
1227
1228
1229
1230
1231
1232
1233 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1234 etype != RCVHQ_RCV_TYPE_ERROR &&
1235 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1236 IPS_PROTO_VERSION)
1237 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1238 "%x\n", etype);
1239
1240 if (unlikely(eflags))
1241 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1242 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1243 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1244 if (dd->ipath_lli_counter)
1245 dd->ipath_lli_counter--;
1246 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1247 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1248 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1249 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1250 "qp=%x), len %x; ignored\n",
1251 etype, opcode, qp, tlen);
1252 }
1253 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1254 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1255 be32_to_cpu(hdr->bth[0]) >> 24);
1256 else {
1257
1258
1259
1260
1261
1262
1263
1264 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1265 " %x, len %x hdrq+%x rhf: %Lx\n",
1266 etail, tlen, l, (unsigned long long)
1267 le64_to_cpu(*(__le64 *) rhf_addr));
1268 if (ipath_debug & __IPATH_ERRPKTDBG) {
1269 u32 j, *d, dw = rsize-2;
1270 if (rsize > (tlen>>2))
1271 dw = tlen>>2;
1272 d = (u32 *)hdr;
1273 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1274 dw);
1275 for (j = 0; j < dw; j++)
1276 printk(KERN_DEBUG "%8x%s", d[j],
1277 (j%8) == 7 ? "\n" : " ");
1278 printk(KERN_DEBUG ".\n");
1279 }
1280 }
1281 l += rsize;
1282 if (l >= maxcnt)
1283 l = 0;
1284 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1285 l + dd->ipath_rhf_offset;
1286 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1287 u32 seq = ipath_hdrget_seq(rhf_addr);
1288
1289 if (++pd->port_seq_cnt > 13)
1290 pd->port_seq_cnt = 1;
1291 if (seq != pd->port_seq_cnt)
1292 last = 1;
1293 } else if (l == hdrqtail)
1294 last = 1;
1295
1296
1297
1298
1299
1300 if (last || !(i & 0xf)) {
1301 u64 lval = l;
1302
1303
1304 if (last)
1305 lval |= dd->ipath_rhdrhead_intr_off;
1306 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1307 pd->port_port);
1308 if (updegr) {
1309 ipath_write_ureg(dd, ur_rcvegrindexhead,
1310 etail, pd->port_port);
1311 updegr = 0;
1312 }
1313 }
1314 }
1315
1316 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1317 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1318
1319
1320
1321
1322
1323
1324
1325
1326 u32 hqtail = ipath_get_rcvhdrtail(pd);
1327 if (hqtail != hdrqtail) {
1328 hdrqtail = hqtail;
1329 reloop = 1;
1330 goto reloop;
1331 }
1332 }
1333
1334 pkttot += i;
1335
1336 pd->port_head = l;
1337
1338 if (pkttot > ipath_stats.sps_maxpkts_call)
1339 ipath_stats.sps_maxpkts_call = pkttot;
1340 ipath_stats.sps_port0pkts += pkttot;
1341 ipath_stats.sps_avgpkts_call =
1342 ipath_stats.sps_port0pkts / ++totcalls;
1343
1344bail:;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1357{
1358 unsigned long flags;
1359 int i;
1360 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 if (!dd->ipath_pioavailregs_dma) {
1380 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1381 return;
1382 }
1383 if (ipath_debug & __IPATH_VERBDBG) {
1384
1385 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1386 unsigned long *shadow = dd->ipath_pioavailshadow;
1387
1388 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1389 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1390 "s3=%lx\n",
1391 (unsigned long long) le64_to_cpu(dma[0]),
1392 shadow[0],
1393 (unsigned long long) le64_to_cpu(dma[1]),
1394 shadow[1],
1395 (unsigned long long) le64_to_cpu(dma[2]),
1396 shadow[2],
1397 (unsigned long long) le64_to_cpu(dma[3]),
1398 shadow[3]);
1399 if (piobregs > 4)
1400 ipath_cdbg(
1401 PKT, "2nd group, dma4=%llx shad4=%lx, "
1402 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1403 "d7=%llx s7=%lx\n",
1404 (unsigned long long) le64_to_cpu(dma[4]),
1405 shadow[4],
1406 (unsigned long long) le64_to_cpu(dma[5]),
1407 shadow[5],
1408 (unsigned long long) le64_to_cpu(dma[6]),
1409 shadow[6],
1410 (unsigned long long) le64_to_cpu(dma[7]),
1411 shadow[7]);
1412 }
1413 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1414 for (i = 0; i < piobregs; i++) {
1415 u64 pchbusy, pchg, piov, pnew;
1416
1417
1418
1419 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1420 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1421 else
1422 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1423 pchg = dd->ipath_pioavailkernel[i] &
1424 ~(dd->ipath_pioavailshadow[i] ^ piov);
1425 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1426 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1427 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1428 pnew |= piov & pchbusy;
1429 dd->ipath_pioavailshadow[i] = pnew;
1430 }
1431 }
1432 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1433}
1434
1435
1436
1437
1438
1439
1440
1441static void ipath_reset_availshadow(struct ipath_devdata *dd)
1442{
1443 int i, im;
1444 unsigned long flags;
1445
1446 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1447 for (i = 0; i < dd->ipath_pioavregs; i++) {
1448 u64 val, oldval;
1449
1450 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1451 i ^ 1 : i;
1452 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1453
1454
1455
1456
1457 oldval = dd->ipath_pioavailshadow[i];
1458 dd->ipath_pioavailshadow[i] = val |
1459 ((~dd->ipath_pioavailkernel[i] <<
1460 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1461 0xaaaaaaaaaaaaaaaaULL);
1462 if (oldval != dd->ipath_pioavailshadow[i])
1463 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1464 i, (unsigned long long) oldval,
1465 dd->ipath_pioavailshadow[i]);
1466 }
1467 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1478{
1479 int ret = 0;
1480
1481 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1482 if (dd->ipath_rcvhdrsize != rhdrsize) {
1483 dev_info(&dd->pcidev->dev,
1484 "Error: can't set protocol header "
1485 "size %u, already %u\n",
1486 rhdrsize, dd->ipath_rcvhdrsize);
1487 ret = -EAGAIN;
1488 } else
1489 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1490 "size %u\n", dd->ipath_rcvhdrsize);
1491 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1492 (sizeof(u64) / sizeof(u32)))) {
1493 ipath_dbg("Error: can't set protocol header size %u "
1494 "(> max %u)\n", rhdrsize,
1495 dd->ipath_rcvhdrentsize -
1496 (u32) (sizeof(u64) / sizeof(u32)));
1497 ret = -EOVERFLOW;
1498 } else {
1499 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1500 dd->ipath_rcvhdrsize = rhdrsize;
1501 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1502 dd->ipath_rcvhdrsize);
1503 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1504 dd->ipath_rcvhdrsize);
1505 }
1506 return ret;
1507}
1508
1509
1510
1511
1512static noinline void no_pio_bufs(struct ipath_devdata *dd)
1513{
1514 unsigned long *shadow = dd->ipath_pioavailshadow;
1515 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1516
1517 dd->ipath_upd_pio_shadow = 1;
1518
1519
1520
1521
1522 ipath_stats.sps_nopiobufs++;
1523 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1524 ipath_force_pio_avail_update(dd);
1525 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1526 "%llx %llx %llx %llx\n"
1527 "ipath shadow: %lx %lx %lx %lx\n",
1528 dd->ipath_consec_nopiobuf,
1529 (unsigned long)get_cycles(),
1530 (unsigned long long) le64_to_cpu(dma[0]),
1531 (unsigned long long) le64_to_cpu(dma[1]),
1532 (unsigned long long) le64_to_cpu(dma[2]),
1533 (unsigned long long) le64_to_cpu(dma[3]),
1534 shadow[0], shadow[1], shadow[2], shadow[3]);
1535
1536
1537
1538
1539 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1540 (sizeof(shadow[0]) * 4 * 4))
1541 ipath_dbg("2nd group: dmacopy: "
1542 "%llx %llx %llx %llx\n"
1543 "ipath shadow: %lx %lx %lx %lx\n",
1544 (unsigned long long)le64_to_cpu(dma[4]),
1545 (unsigned long long)le64_to_cpu(dma[5]),
1546 (unsigned long long)le64_to_cpu(dma[6]),
1547 (unsigned long long)le64_to_cpu(dma[7]),
1548 shadow[4], shadow[5], shadow[6], shadow[7]);
1549
1550
1551 ipath_reset_availshadow(dd);
1552 }
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1563 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1564{
1565 int i, j, updated = 0;
1566 unsigned piobcnt;
1567 unsigned long flags;
1568 unsigned long *shadow = dd->ipath_pioavailshadow;
1569 u32 __iomem *buf;
1570
1571 piobcnt = last - first;
1572 if (dd->ipath_upd_pio_shadow) {
1573
1574
1575
1576
1577
1578 ipath_update_pio_bufs(dd);
1579 updated++;
1580 i = first;
1581 } else
1582 i = firsti;
1583rescan:
1584
1585
1586
1587
1588
1589 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1590 for (j = 0; j < piobcnt; j++, i++) {
1591 if (i >= last)
1592 i = first;
1593 if (__test_and_set_bit((2 * i) + 1, shadow))
1594 continue;
1595
1596 __change_bit(2 * i, shadow);
1597 break;
1598 }
1599 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1600
1601 if (j == piobcnt) {
1602 if (!updated) {
1603
1604
1605
1606
1607 ipath_update_pio_bufs(dd);
1608 updated++;
1609 i = first;
1610 goto rescan;
1611 } else if (updated == 1 && piobcnt <=
1612 ((dd->ipath_sendctrl
1613 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1614 INFINIPATH_S_UPDTHRESH_MASK)) {
1615
1616
1617
1618
1619
1620
1621 ipath_force_pio_avail_update(dd);
1622 ipath_update_pio_bufs(dd);
1623 updated++;
1624 i = first;
1625 goto rescan;
1626 }
1627
1628 no_pio_bufs(dd);
1629 buf = NULL;
1630 } else {
1631 if (i < dd->ipath_piobcnt2k)
1632 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1633 i * dd->ipath_palign);
1634 else
1635 buf = (u32 __iomem *)
1636 (dd->ipath_pio4kbase +
1637 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1638 if (pbufnum)
1639 *pbufnum = i;
1640 }
1641
1642 return buf;
1643}
1644
1645
1646
1647
1648
1649
1650
1651u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1652{
1653 u32 __iomem *buf;
1654 u32 pnum, nbufs;
1655 u32 first, lasti;
1656
1657 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1658 first = dd->ipath_piobcnt2k;
1659 lasti = dd->ipath_lastpioindexl;
1660 } else {
1661 first = 0;
1662 lasti = dd->ipath_lastpioindex;
1663 }
1664 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1665 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1666
1667 if (buf) {
1668
1669
1670
1671
1672 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1673 dd->ipath_lastpioindexl = pnum + 1;
1674 else
1675 dd->ipath_lastpioindex = pnum + 1;
1676 if (dd->ipath_upd_pio_shadow)
1677 dd->ipath_upd_pio_shadow = 0;
1678 if (dd->ipath_consec_nopiobuf)
1679 dd->ipath_consec_nopiobuf = 0;
1680 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1681 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1682 if (pbufnum)
1683 *pbufnum = pnum;
1684
1685 }
1686 return buf;
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1697 unsigned len, int avail)
1698{
1699 unsigned long flags;
1700 unsigned end, cnt = 0, next;
1701
1702
1703 start *= 2;
1704 end = start + len * 2;
1705
1706 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1707
1708 while (start < end) {
1709 if (avail) {
1710 unsigned long dma;
1711 int i, im;
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726 i = start / BITS_PER_LONG;
1727 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1728 i ^ 1 : i;
1729 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1730 + start, dd->ipath_pioavailshadow);
1731 dma = (unsigned long) le64_to_cpu(
1732 dd->ipath_pioavailregs_dma[im]);
1733 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1734 + start) % BITS_PER_LONG, &dma))
1735 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1736 + start, dd->ipath_pioavailshadow);
1737 else
1738 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1739 + start, dd->ipath_pioavailshadow);
1740 __set_bit(start, dd->ipath_pioavailkernel);
1741 } else {
1742 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1743 dd->ipath_pioavailshadow);
1744 __clear_bit(start, dd->ipath_pioavailkernel);
1745 }
1746 start += 2;
1747 }
1748
1749 if (dd->ipath_pioupd_thresh) {
1750 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1751 next = find_first_bit(dd->ipath_pioavailkernel, end);
1752 while (next < end) {
1753 cnt++;
1754 next = find_next_bit(dd->ipath_pioavailkernel, end,
1755 next + 1);
1756 }
1757 }
1758 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 if (!avail && len < cnt)
1772 cnt = len;
1773 if (cnt < dd->ipath_pioupd_thresh) {
1774 dd->ipath_pioupd_thresh = cnt;
1775 ipath_dbg("Decreased pio update threshold to %u\n",
1776 dd->ipath_pioupd_thresh);
1777 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1778 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1779 << INFINIPATH_S_UPDTHRESH_SHIFT);
1780 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1781 << INFINIPATH_S_UPDTHRESH_SHIFT;
1782 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1783 dd->ipath_sendctrl);
1784 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1785 }
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1798 struct ipath_portdata *pd)
1799{
1800 int ret = 0;
1801
1802 if (!pd->port_rcvhdrq) {
1803 dma_addr_t phys_hdrqtail;
1804 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1805 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1806 sizeof(u32), PAGE_SIZE);
1807
1808 pd->port_rcvhdrq = dma_alloc_coherent(
1809 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1810 gfp_flags);
1811
1812 if (!pd->port_rcvhdrq) {
1813 ipath_dev_err(dd, "attempt to allocate %d bytes "
1814 "for port %u rcvhdrq failed\n",
1815 amt, pd->port_port);
1816 ret = -ENOMEM;
1817 goto bail;
1818 }
1819
1820 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1821 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1822 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1823 GFP_KERNEL);
1824 if (!pd->port_rcvhdrtail_kvaddr) {
1825 ipath_dev_err(dd, "attempt to allocate 1 page "
1826 "for port %u rcvhdrqtailaddr "
1827 "failed\n", pd->port_port);
1828 ret = -ENOMEM;
1829 dma_free_coherent(&dd->pcidev->dev, amt,
1830 pd->port_rcvhdrq,
1831 pd->port_rcvhdrq_phys);
1832 pd->port_rcvhdrq = NULL;
1833 goto bail;
1834 }
1835 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1836 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1837 "physical\n", pd->port_port,
1838 (unsigned long long) phys_hdrqtail);
1839 }
1840
1841 pd->port_rcvhdrq_size = amt;
1842
1843 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1844 "for port %u rcvhdr Q\n",
1845 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1846 (unsigned long) pd->port_rcvhdrq_phys,
1847 (unsigned long) pd->port_rcvhdrq_size,
1848 pd->port_port);
1849 }
1850 else
1851 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1852 "hdrtailaddr@%p %llx physical\n",
1853 pd->port_port, pd->port_rcvhdrq,
1854 (unsigned long long) pd->port_rcvhdrq_phys,
1855 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1856 pd->port_rcvhdrqtailaddr_phys);
1857
1858
1859 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1860 if (pd->port_rcvhdrtail_kvaddr)
1861 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1862
1863
1864
1865
1866
1867 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1868 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
1869 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1870 pd->port_port, pd->port_rcvhdrq_phys);
1871
1872bail:
1873 return ret;
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1887{
1888 unsigned long flags;
1889
1890 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1891 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1892 goto bail;
1893 }
1894
1895
1896
1897
1898
1899
1900 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1901 int skip_cancel;
1902 unsigned long *statp = &dd->ipath_sdma_status;
1903
1904 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1905 skip_cancel =
1906 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1907 && !test_bit(IPATH_SDMA_DISABLED, statp);
1908 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1909 if (skip_cancel)
1910 goto bail;
1911 }
1912
1913 ipath_dbg("Cancelling all in-progress send buffers\n");
1914
1915
1916 dd->ipath_lastcancel = jiffies + HZ / 2;
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1927 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1928 | INFINIPATH_S_PIOENABLE);
1929 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1930 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1931 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1932 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1933
1934
1935 ipath_disarm_piobufs(dd, 0,
1936 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1937
1938 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1939 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1940
1941 if (restore_sendctrl) {
1942
1943 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1944 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1945 INFINIPATH_S_PIOENABLE;
1946 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1947 dd->ipath_sendctrl);
1948
1949 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1950 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1951 }
1952
1953 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1954 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1955 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1956 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1957
1958 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1959 dd->ipath_sdma_reset_wait = 200;
1960 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1961 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1962 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1963 }
1964bail:;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1976{
1977 unsigned long flags;
1978
1979 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1980 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1981 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1982 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1983 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1984 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1985 dd->ipath_sendctrl);
1986 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1987 }
1988 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1989}
1990
1991static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1992 int linitcmd)
1993{
1994 u64 mod_wd;
1995 static const char *what[4] = {
1996 [0] = "NOP",
1997 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1998 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1999 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
2000 };
2001
2002 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
2003
2004
2005
2006
2007 preempt_disable();
2008 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
2009 preempt_enable();
2010 } else if (linitcmd) {
2011
2012
2013
2014
2015
2016 preempt_disable();
2017 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
2018 preempt_enable();
2019 }
2020
2021 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
2022 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
2023 ipath_cdbg(VERBOSE,
2024 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
2025 dd->ipath_unit, what[linkcmd], linitcmd,
2026 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
2027 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
2028
2029 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2030 dd->ipath_ibcctrl | mod_wd);
2031
2032 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2033}
2034
2035int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2036{
2037 u32 lstate;
2038 int ret;
2039
2040 switch (newstate) {
2041 case IPATH_IB_LINKDOWN_ONLY:
2042 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2043
2044 ret = 0;
2045 goto bail;
2046
2047 case IPATH_IB_LINKDOWN:
2048 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2049 INFINIPATH_IBCC_LINKINITCMD_POLL);
2050
2051 ret = 0;
2052 goto bail;
2053
2054 case IPATH_IB_LINKDOWN_SLEEP:
2055 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2056 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2057
2058 ret = 0;
2059 goto bail;
2060
2061 case IPATH_IB_LINKDOWN_DISABLE:
2062 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2063 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2064
2065 ret = 0;
2066 goto bail;
2067
2068 case IPATH_IB_LINKARM:
2069 if (dd->ipath_flags & IPATH_LINKARMED) {
2070 ret = 0;
2071 goto bail;
2072 }
2073 if (!(dd->ipath_flags &
2074 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
2075 ret = -EINVAL;
2076 goto bail;
2077 }
2078 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2079
2080
2081
2082
2083
2084 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
2085 break;
2086
2087 case IPATH_IB_LINKACTIVE:
2088 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2089 ret = 0;
2090 goto bail;
2091 }
2092 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2093 ret = -EINVAL;
2094 goto bail;
2095 }
2096 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2097 lstate = IPATH_LINKACTIVE;
2098 break;
2099
2100 case IPATH_IB_LINK_LOOPBACK:
2101 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2102 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2103 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2104 dd->ipath_ibcctrl);
2105
2106
2107 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2108 IPATH_IB_HRTBT_OFF);
2109
2110 ret = 0;
2111 goto bail;
2112
2113 case IPATH_IB_LINK_EXTERNAL:
2114 dev_info(&dd->pcidev->dev,
2115 "Disabling IB local loopback (normal)\n");
2116 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2117 IPATH_IB_HRTBT_ON);
2118 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2119 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2120 dd->ipath_ibcctrl);
2121
2122 ret = 0;
2123 goto bail;
2124
2125
2126
2127
2128
2129
2130
2131 case IPATH_IB_LINK_HRTBT:
2132 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2133 IPATH_IB_HRTBT_ON);
2134 goto bail;
2135
2136 case IPATH_IB_LINK_NO_HRTBT:
2137 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2138 IPATH_IB_HRTBT_OFF);
2139 goto bail;
2140
2141 default:
2142 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
2143 ret = -EINVAL;
2144 goto bail;
2145 }
2146 ret = ipath_wait_linkstate(dd, lstate, 2000);
2147
2148bail:
2149 return ret;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2165{
2166 u32 piosize;
2167 int changed = 0;
2168 int ret;
2169
2170
2171
2172
2173
2174
2175
2176 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
2177 (arg != 4096 || !ipath_mtu4096)) {
2178 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
2179 ret = -EINVAL;
2180 goto bail;
2181 }
2182 if (dd->ipath_ibmtu == arg) {
2183 ret = 0;
2184 goto bail;
2185 }
2186
2187 piosize = dd->ipath_ibmaxlen;
2188 dd->ipath_ibmtu = arg;
2189
2190 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
2191
2192 if (piosize != dd->ipath_init_ibmaxlen) {
2193 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2194 piosize = dd->ipath_init_ibmaxlen;
2195 dd->ipath_ibmaxlen = piosize;
2196 changed = 1;
2197 }
2198 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2199 piosize = arg + IPATH_PIO_MAXIBHDR;
2200 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
2201 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2202 arg);
2203 dd->ipath_ibmaxlen = piosize;
2204 changed = 1;
2205 }
2206
2207 if (changed) {
2208 u64 ibc = dd->ipath_ibcctrl, ibdw;
2209
2210
2211
2212
2213
2214 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2215 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2216 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
2217 dd->ibcc_mpl_shift);
2218 ibc |= ibdw << dd->ibcc_mpl_shift;
2219 dd->ipath_ibcctrl = ibc;
2220 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2221 dd->ipath_ibcctrl);
2222 dd->ipath_f_tidtemplate(dd);
2223 }
2224
2225 ret = 0;
2226
2227bail:
2228 return ret;
2229}
2230
2231int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2232{
2233 dd->ipath_lid = lid;
2234 dd->ipath_lmc = lmc;
2235
2236 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2237 (~((1U << lmc) - 1)) << 16);
2238
2239 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2240
2241 return 0;
2242}
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2256 unsigned port, u64 value)
2257{
2258 u16 where;
2259
2260 if (port < dd->ipath_portcnt &&
2261 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2262 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2263 where = regno + port;
2264 else
2265 where = -1;
2266
2267 ipath_write_kreg(dd, where, value);
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277#define LED_OVER_FREQ_SHIFT 8
2278#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2279
2280#define LED_OVER_BOTH_OFF (8)
2281
2282static void ipath_run_led_override(unsigned long opaque)
2283{
2284 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2285 int timeoff;
2286 int pidx;
2287 u64 lstate, ltstate, val;
2288
2289 if (!(dd->ipath_flags & IPATH_INITTED))
2290 return;
2291
2292 pidx = dd->ipath_led_override_phase++ & 1;
2293 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2294 timeoff = dd->ipath_led_override_timeoff;
2295
2296
2297
2298
2299
2300
2301 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2302 ltstate = ipath_ib_linktrstate(dd, val);
2303 lstate = ipath_ib_linkstate(dd, val);
2304
2305 dd->ipath_f_setextled(dd, lstate, ltstate);
2306 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2307}
2308
2309void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2310{
2311 int timeoff, freq;
2312
2313 if (!(dd->ipath_flags & IPATH_INITTED))
2314 return;
2315
2316
2317 timeoff = HZ;
2318 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2319
2320 if (freq) {
2321
2322 dd->ipath_led_override_vals[0] = val & 0xF;
2323 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2324 timeoff = (HZ << 4)/freq;
2325 } else {
2326
2327 dd->ipath_led_override_vals[0] = val & 0xF;
2328 dd->ipath_led_override_vals[1] = val & 0xF;
2329 }
2330 dd->ipath_led_override_timeoff = timeoff;
2331
2332
2333
2334
2335
2336 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2337
2338 init_timer(&dd->ipath_led_override_timer);
2339 dd->ipath_led_override_timer.function =
2340 ipath_run_led_override;
2341 dd->ipath_led_override_timer.data = (unsigned long) dd;
2342 dd->ipath_led_override_timer.expires = jiffies + 1;
2343 add_timer(&dd->ipath_led_override_timer);
2344 } else
2345 atomic_dec(&dd->ipath_led_override_timer_active);
2346}
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357void ipath_shutdown_device(struct ipath_devdata *dd)
2358{
2359 unsigned long flags;
2360
2361 ipath_dbg("Shutting down the device\n");
2362
2363 ipath_hol_up(dd);
2364
2365 dd->ipath_flags |= IPATH_LINKUNK;
2366 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2367 IPATH_LINKINIT | IPATH_LINKARMED |
2368 IPATH_LINKACTIVE);
2369 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2370 IPATH_STATUS_IB_READY);
2371
2372
2373 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2374
2375 dd->ipath_rcvctrl = 0;
2376 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2377 dd->ipath_rcvctrl);
2378
2379 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2380 teardown_sdma(dd);
2381
2382
2383
2384
2385
2386 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2387 dd->ipath_sendctrl = 0;
2388 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2389
2390 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2391 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2392
2393
2394
2395
2396
2397 udelay(5);
2398
2399 dd->ipath_f_setextled(dd, 0, 0);
2400
2401 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2402 ipath_cancel_sends(dd, 0);
2403
2404
2405
2406
2407
2408
2409 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2410
2411
2412 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2413 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2414 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2415
2416
2417
2418
2419
2420
2421 dd->ipath_f_quiet_serdes(dd);
2422
2423
2424 del_timer_sync(&dd->ipath_hol_timer);
2425 if (dd->ipath_stats_timer_active) {
2426 del_timer_sync(&dd->ipath_stats_timer);
2427 dd->ipath_stats_timer_active = 0;
2428 }
2429 if (dd->ipath_intrchk_timer.data) {
2430 del_timer_sync(&dd->ipath_intrchk_timer);
2431 dd->ipath_intrchk_timer.data = 0;
2432 }
2433 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2434 del_timer_sync(&dd->ipath_led_override_timer);
2435 atomic_set(&dd->ipath_led_override_timer_active, 0);
2436 }
2437
2438
2439
2440
2441
2442
2443 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2444 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2445 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2446 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2447
2448 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2449 ipath_update_eeprom_log(dd);
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2465{
2466 if (!pd)
2467 return;
2468
2469 if (pd->port_rcvhdrq) {
2470 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2471 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2472 (unsigned long) pd->port_rcvhdrq_size);
2473 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2474 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2475 pd->port_rcvhdrq = NULL;
2476 if (pd->port_rcvhdrtail_kvaddr) {
2477 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2478 pd->port_rcvhdrtail_kvaddr,
2479 pd->port_rcvhdrqtailaddr_phys);
2480 pd->port_rcvhdrtail_kvaddr = NULL;
2481 }
2482 }
2483 if (pd->port_port && pd->port_rcvegrbuf) {
2484 unsigned e;
2485
2486 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2487 void *base = pd->port_rcvegrbuf[e];
2488 size_t size = pd->port_rcvegrbuf_size;
2489
2490 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2491 "chunk %u/%u\n", base,
2492 (unsigned long) size,
2493 e, pd->port_rcvegrbuf_chunks);
2494 dma_free_coherent(&dd->pcidev->dev, size,
2495 base, pd->port_rcvegrbuf_phys[e]);
2496 }
2497 kfree(pd->port_rcvegrbuf);
2498 pd->port_rcvegrbuf = NULL;
2499 kfree(pd->port_rcvegrbuf_phys);
2500 pd->port_rcvegrbuf_phys = NULL;
2501 pd->port_rcvegrbuf_chunks = 0;
2502 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2503 unsigned e;
2504 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2505
2506 dd->ipath_port0_skbinfo = NULL;
2507 ipath_cdbg(VERBOSE, "free closed port %d "
2508 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2509 skbinfo);
2510 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2511 if (skbinfo[e].skb) {
2512 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2513 dd->ipath_ibmaxlen,
2514 PCI_DMA_FROMDEVICE);
2515 dev_kfree_skb(skbinfo[e].skb);
2516 }
2517 vfree(skbinfo);
2518 }
2519 kfree(pd->port_tid_pg_list);
2520 vfree(pd->subport_uregbase);
2521 vfree(pd->subport_rcvegrbuf);
2522 vfree(pd->subport_rcvhdr_base);
2523 kfree(pd);
2524}
2525
2526static int __init infinipath_init(void)
2527{
2528 int ret;
2529
2530 if (ipath_debug & __IPATH_DBG)
2531 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
2532
2533
2534
2535
2536
2537 idr_init(&unit_table);
2538 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2539 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2540 ret = -ENOMEM;
2541 goto bail;
2542 }
2543
2544 ret = pci_register_driver(&ipath_driver);
2545 if (ret < 0) {
2546 printk(KERN_ERR IPATH_DRV_NAME
2547 ": Unable to register driver: error %d\n", -ret);
2548 goto bail_unit;
2549 }
2550
2551 ret = ipath_init_ipathfs();
2552 if (ret < 0) {
2553 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2554 "ipathfs: error %d\n", -ret);
2555 goto bail_pci;
2556 }
2557
2558 goto bail;
2559
2560bail_pci:
2561 pci_unregister_driver(&ipath_driver);
2562
2563bail_unit:
2564 idr_destroy(&unit_table);
2565
2566bail:
2567 return ret;
2568}
2569
2570static void __exit infinipath_cleanup(void)
2571{
2572 ipath_exit_ipathfs();
2573
2574 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2575 pci_unregister_driver(&ipath_driver);
2576
2577 idr_destroy(&unit_table);
2578}
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589int ipath_reset_device(int unit)
2590{
2591 int ret, i;
2592 struct ipath_devdata *dd = ipath_lookup(unit);
2593 unsigned long flags;
2594
2595 if (!dd) {
2596 ret = -ENODEV;
2597 goto bail;
2598 }
2599
2600 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2601
2602 del_timer_sync(&dd->ipath_led_override_timer);
2603 atomic_set(&dd->ipath_led_override_timer_active, 0);
2604 }
2605
2606
2607 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2608 dd->ipath_f_setextled(dd, 0, 0);
2609
2610 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2611
2612 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2613 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2614 "not initialized or not present\n", unit);
2615 ret = -ENXIO;
2616 goto bail;
2617 }
2618
2619 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2620 if (dd->ipath_pd)
2621 for (i = 1; i < dd->ipath_cfgports; i++) {
2622 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2623 continue;
2624 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2625 ipath_dbg("unit %u port %d is in use "
2626 "(PID %u cmd %s), can't reset\n",
2627 unit, i,
2628 pid_nr(dd->ipath_pd[i]->port_pid),
2629 dd->ipath_pd[i]->port_comm);
2630 ret = -EBUSY;
2631 goto bail;
2632 }
2633 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2634
2635 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2636 teardown_sdma(dd);
2637
2638 dd->ipath_flags &= ~IPATH_INITTED;
2639 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2640 ret = dd->ipath_f_reset(dd);
2641 if (ret == 1) {
2642 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2643 unit);
2644 ret = ipath_init_chip(dd, 1);
2645 } else
2646 ret = -EAGAIN;
2647 if (ret)
2648 ipath_dev_err(dd, "Reinitialize unit %u after "
2649 "reset failed with %d\n", unit, ret);
2650 else
2651 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2652 "resetting\n", unit);
2653
2654bail:
2655 return ret;
2656}
2657
2658
2659
2660
2661
2662
2663static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2664{
2665 int i, sub, any = 0;
2666 struct pid *pid;
2667 unsigned long flags;
2668
2669 if (!dd->ipath_pd)
2670 return 0;
2671
2672 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2673 for (i = 1; i < dd->ipath_cfgports; i++) {
2674 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2675 continue;
2676 pid = dd->ipath_pd[i]->port_pid;
2677 if (!pid)
2678 continue;
2679
2680 dev_info(&dd->pcidev->dev, "context %d in use "
2681 "(PID %u), sending signal %d\n",
2682 i, pid_nr(pid), sig);
2683 kill_pid(pid, sig, 1);
2684 any++;
2685 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2686 pid = dd->ipath_pd[i]->port_subpid[sub];
2687 if (!pid)
2688 continue;
2689 dev_info(&dd->pcidev->dev, "sub-context "
2690 "%d:%d in use (PID %u), sending "
2691 "signal %d\n", i, sub, pid_nr(pid), sig);
2692 kill_pid(pid, sig, 1);
2693 any++;
2694 }
2695 }
2696 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2697 return any;
2698}
2699
2700static void ipath_hol_signal_down(struct ipath_devdata *dd)
2701{
2702 if (ipath_signal_procs(dd, SIGSTOP))
2703 ipath_dbg("Stopped some processes\n");
2704 ipath_cancel_sends(dd, 1);
2705}
2706
2707
2708static void ipath_hol_signal_up(struct ipath_devdata *dd)
2709{
2710 if (ipath_signal_procs(dd, SIGCONT))
2711 ipath_dbg("Continued some processes\n");
2712}
2713
2714
2715
2716
2717
2718
2719
2720
2721void ipath_hol_down(struct ipath_devdata *dd)
2722{
2723 dd->ipath_hol_state = IPATH_HOL_DOWN;
2724 ipath_hol_signal_down(dd);
2725 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2726 dd->ipath_hol_timer.expires = jiffies +
2727 msecs_to_jiffies(ipath_hol_timeout_ms);
2728 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2729}
2730
2731
2732
2733
2734
2735
2736void ipath_hol_up(struct ipath_devdata *dd)
2737{
2738 ipath_hol_signal_up(dd);
2739 dd->ipath_hol_state = IPATH_HOL_UP;
2740}
2741
2742
2743
2744
2745
2746
2747
2748void ipath_hol_event(unsigned long opaque)
2749{
2750 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2751
2752 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2753 && dd->ipath_hol_state != IPATH_HOL_UP) {
2754 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2755 ipath_dbg("Stopping processes\n");
2756 ipath_hol_signal_down(dd);
2757 } else {
2758 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2759 ipath_dbg("Continuing processes\n");
2760 ipath_hol_signal_up(dd);
2761 }
2762 if (dd->ipath_hol_state == IPATH_HOL_UP)
2763 ipath_dbg("link's up, don't resched timer\n");
2764 else {
2765 dd->ipath_hol_timer.expires = jiffies +
2766 msecs_to_jiffies(ipath_hol_timeout_ms);
2767 mod_timer(&dd->ipath_hol_timer,
2768 dd->ipath_hol_timer.expires);
2769 }
2770}
2771
2772int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2773{
2774 u64 val;
2775
2776 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2777 return -1;
2778 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2779 dd->ipath_rx_pol_inv = new_pol_inv;
2780 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2781 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2782 INFINIPATH_XGXS_RX_POL_SHIFT);
2783 val |= ((u64)dd->ipath_rx_pol_inv) <<
2784 INFINIPATH_XGXS_RX_POL_SHIFT;
2785 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2786 }
2787 return 0;
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799void ipath_enable_armlaunch(struct ipath_devdata *dd)
2800{
2801 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2802 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2803 INFINIPATH_E_SPIOARMLAUNCH);
2804 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2805 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2806 dd->ipath_errormask);
2807}
2808
2809void ipath_disable_armlaunch(struct ipath_devdata *dd)
2810{
2811
2812 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2813 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2814 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2815 dd->ipath_errormask);
2816}
2817
2818module_init(infinipath_init);
2819module_exit(infinipath_cleanup);
2820