1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/sched.h>
35#include <linux/spinlock.h>
36#include <linux/idr.h>
37#include <linux/pci.h>
38#include <linux/io.h>
39#include <linux/delay.h>
40#include <linux/netdevice.h>
41#include <linux/vmalloc.h>
42#include <linux/bitmap.h>
43#include <linux/slab.h>
44#include <linux/module.h>
45
46#include "ipath_kernel.h"
47#include "ipath_verbs.h"
48
49static void ipath_update_pio_bufs(struct ipath_devdata *);
50
51const char *ipath_get_unit_name(int unit)
52{
53 static char iname[16];
54 snprintf(iname, sizeof iname, "infinipath%u", unit);
55 return iname;
56}
57
58#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
59#define PFX IPATH_DRV_NAME ": "
60
61
62
63
64
65const char ib_ipath_version[] = IPATH_IDSTR "\n";
66
67static struct idr unit_table;
68DEFINE_SPINLOCK(ipath_devs_lock);
69LIST_HEAD(ipath_dev_list);
70
71wait_queue_head_t ipath_state_wait;
72
73unsigned ipath_debug = __IPATH_INFO;
74
75module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
76MODULE_PARM_DESC(debug, "mask for debug prints");
77EXPORT_SYMBOL_GPL(ipath_debug);
78
79unsigned ipath_mtu4096 = 1;
80module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
81MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
82
83static unsigned ipath_hol_timeout_ms = 13000;
84module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
85MODULE_PARM_DESC(hol_timeout_ms,
86 "duration of user app suspension after link failure");
87
88unsigned ipath_linkrecovery = 1;
89module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
90MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
91
92MODULE_LICENSE("GPL");
93MODULE_AUTHOR("QLogic <support@qlogic.com>");
94MODULE_DESCRIPTION("QLogic InfiniPath driver");
95
96
97
98
99
100const char *ipath_ibcstatus_str[] = {
101 "Disabled",
102 "LinkUp",
103 "PollActive",
104 "PollQuiet",
105 "SleepDelay",
106 "SleepQuiet",
107 "LState6",
108 "LState7",
109 "CfgDebounce",
110 "CfgRcvfCfg",
111 "CfgWaitRmt",
112 "CfgIdle",
113 "RecovRetrain",
114 "CfgTxRevLane",
115 "RecovWaitRmt",
116 "RecovIdle",
117
118 "CfgEnhanced",
119 "CfgTest",
120 "CfgWaitRmtTest",
121 "CfgWaitCfgEnhanced",
122 "SendTS_T",
123 "SendTstIdles",
124 "RcvTS_T",
125 "SendTst_TS1s",
126 "LTState18", "LTState19", "LTState1A", "LTState1B",
127 "LTState1C", "LTState1D", "LTState1E", "LTState1F"
128};
129
130static void __devexit ipath_remove_one(struct pci_dev *);
131static int __devinit ipath_init_one(struct pci_dev *,
132 const struct pci_device_id *);
133
134
135#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
136#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
137
138
139#define STATUS_TIMEOUT 60
140
141static const struct pci_device_id ipath_pci_tbl[] = {
142 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
143 { 0, }
144};
145
146MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
147
148static struct pci_driver ipath_driver = {
149 .name = IPATH_DRV_NAME,
150 .probe = ipath_init_one,
151 .remove = __devexit_p(ipath_remove_one),
152 .id_table = ipath_pci_tbl,
153 .driver = {
154 .groups = ipath_driver_attr_groups,
155 },
156};
157
158static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
159 u32 *bar0, u32 *bar1)
160{
161 int ret;
162
163 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
164 if (ret)
165 ipath_dev_err(dd, "failed to read bar0 before enable: "
166 "error %d\n", -ret);
167
168 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
169 if (ret)
170 ipath_dev_err(dd, "failed to read bar1 before enable: "
171 "error %d\n", -ret);
172
173 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
174}
175
176static void ipath_free_devdata(struct pci_dev *pdev,
177 struct ipath_devdata *dd)
178{
179 unsigned long flags;
180
181 pci_set_drvdata(pdev, NULL);
182
183 if (dd->ipath_unit != -1) {
184 spin_lock_irqsave(&ipath_devs_lock, flags);
185 idr_remove(&unit_table, dd->ipath_unit);
186 list_del(&dd->ipath_list);
187 spin_unlock_irqrestore(&ipath_devs_lock, flags);
188 }
189 vfree(dd);
190}
191
192static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
193{
194 unsigned long flags;
195 struct ipath_devdata *dd;
196 int ret;
197
198 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
199 dd = ERR_PTR(-ENOMEM);
200 goto bail;
201 }
202
203 dd = vzalloc(sizeof(*dd));
204 if (!dd) {
205 dd = ERR_PTR(-ENOMEM);
206 goto bail;
207 }
208 dd->ipath_unit = -1;
209
210 spin_lock_irqsave(&ipath_devs_lock, flags);
211
212 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
213 if (ret < 0) {
214 printk(KERN_ERR IPATH_DRV_NAME
215 ": Could not allocate unit ID: error %d\n", -ret);
216 ipath_free_devdata(pdev, dd);
217 dd = ERR_PTR(ret);
218 goto bail_unlock;
219 }
220
221 dd->pcidev = pdev;
222 pci_set_drvdata(pdev, dd);
223
224 list_add(&dd->ipath_list, &ipath_dev_list);
225
226bail_unlock:
227 spin_unlock_irqrestore(&ipath_devs_lock, flags);
228
229bail:
230 return dd;
231}
232
233static inline struct ipath_devdata *__ipath_lookup(int unit)
234{
235 return idr_find(&unit_table, unit);
236}
237
238struct ipath_devdata *ipath_lookup(int unit)
239{
240 struct ipath_devdata *dd;
241 unsigned long flags;
242
243 spin_lock_irqsave(&ipath_devs_lock, flags);
244 dd = __ipath_lookup(unit);
245 spin_unlock_irqrestore(&ipath_devs_lock, flags);
246
247 return dd;
248}
249
250int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
251{
252 int nunits, npresent, nup;
253 struct ipath_devdata *dd;
254 unsigned long flags;
255 int maxports;
256
257 nunits = npresent = nup = maxports = 0;
258
259 spin_lock_irqsave(&ipath_devs_lock, flags);
260
261 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
262 nunits++;
263 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
264 npresent++;
265 if (dd->ipath_lid &&
266 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
267 | IPATH_LINKUNK)))
268 nup++;
269 if (dd->ipath_cfgports > maxports)
270 maxports = dd->ipath_cfgports;
271 }
272
273 spin_unlock_irqrestore(&ipath_devs_lock, flags);
274
275 if (npresentp)
276 *npresentp = npresent;
277 if (nupp)
278 *nupp = nup;
279 if (maxportsp)
280 *maxportsp = maxports;
281
282 return nunits;
283}
284
285
286
287
288
289
290
291int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
292{
293 return -EOPNOTSUPP;
294}
295
296void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
297{
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313static void ipath_verify_pioperf(struct ipath_devdata *dd)
314{
315 u32 pbnum, cnt, lcnt;
316 u32 __iomem *piobuf;
317 u32 *addr;
318 u64 msecs, emsecs;
319
320 piobuf = ipath_getpiobuf(dd, 0, &pbnum);
321 if (!piobuf) {
322 dev_info(&dd->pcidev->dev,
323 "No PIObufs for checking perf, skipping\n");
324 return;
325 }
326
327
328
329
330
331 cnt = 1024;
332
333 addr = vmalloc(cnt);
334 if (!addr) {
335 dev_info(&dd->pcidev->dev,
336 "Couldn't get memory for checking PIO perf,"
337 " skipping\n");
338 goto done;
339 }
340
341 preempt_disable();
342 msecs = 1 + jiffies_to_msecs(jiffies);
343 for (lcnt = 0; lcnt < 10000U; lcnt++) {
344
345 if (jiffies_to_msecs(jiffies) >= msecs)
346 break;
347 udelay(1);
348 }
349
350 ipath_disable_armlaunch(dd);
351
352
353
354
355
356 if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
357 writeq(1UL << 63, piobuf);
358 else
359 writeq(0, piobuf);
360 ipath_flush_wc();
361
362
363
364
365
366
367 msecs = jiffies_to_msecs(jiffies);
368 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
369 __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
370 emsecs = jiffies_to_msecs(jiffies) - msecs;
371 }
372
373
374 if (lcnt < (emsecs * 1024U))
375 ipath_dev_err(dd,
376 "Performance problem: bandwidth to PIO buffers is "
377 "only %u MiB/sec\n",
378 lcnt / (u32) emsecs);
379 else
380 ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
381 lcnt / (u32) emsecs);
382
383 preempt_enable();
384
385 vfree(addr);
386
387done:
388
389 ipath_disarm_piobufs(dd, pbnum, 1);
390 ipath_enable_armlaunch(dd);
391}
392
393static void cleanup_device(struct ipath_devdata *dd);
394
395static int __devinit ipath_init_one(struct pci_dev *pdev,
396 const struct pci_device_id *ent)
397{
398 int ret, len, j;
399 struct ipath_devdata *dd;
400 unsigned long long addr;
401 u32 bar0 = 0, bar1 = 0;
402
403 dd = ipath_alloc_devdata(pdev);
404 if (IS_ERR(dd)) {
405 ret = PTR_ERR(dd);
406 printk(KERN_ERR IPATH_DRV_NAME
407 ": Could not allocate devdata: error %d\n", -ret);
408 goto bail;
409 }
410
411 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
412
413 ret = pci_enable_device(pdev);
414 if (ret) {
415
416
417
418
419
420
421
422
423
424
425
426
427 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
428 dd->ipath_unit, -ret);
429 goto bail_devdata;
430 }
431 addr = pci_resource_start(pdev, 0);
432 len = pci_resource_len(pdev, 0);
433 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
434 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
435 ent->device, ent->driver_data);
436
437 read_bars(dd, pdev, &bar0, &bar1);
438
439 if (!bar1 && !(bar0 & ~0xf)) {
440 if (addr) {
441 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
442 "rewriting as %llx\n", addr);
443 ret = pci_write_config_dword(
444 pdev, PCI_BASE_ADDRESS_0, addr);
445 if (ret) {
446 ipath_dev_err(dd, "rewrite of BAR0 "
447 "failed: err %d\n", -ret);
448 goto bail_disable;
449 }
450 ret = pci_write_config_dword(
451 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
452 if (ret) {
453 ipath_dev_err(dd, "rewrite of BAR1 "
454 "failed: err %d\n", -ret);
455 goto bail_disable;
456 }
457 } else {
458 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
459 "not usable until reboot\n");
460 ret = -ENODEV;
461 goto bail_disable;
462 }
463 }
464
465 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
466 if (ret) {
467 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
468 "err %d\n", dd->ipath_unit, -ret);
469 goto bail_disable;
470 }
471
472 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
473 if (ret) {
474
475
476
477
478
479 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
480 if (ret) {
481 dev_info(&pdev->dev,
482 "Unable to set DMA mask for unit %u: %d\n",
483 dd->ipath_unit, ret);
484 goto bail_regions;
485 }
486 else {
487 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
488 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
489 if (ret)
490 dev_info(&pdev->dev,
491 "Unable to set DMA consistent mask "
492 "for unit %u: %d\n",
493 dd->ipath_unit, ret);
494
495 }
496 }
497 else {
498 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
499 if (ret)
500 dev_info(&pdev->dev,
501 "Unable to set DMA consistent mask "
502 "for unit %u: %d\n",
503 dd->ipath_unit, ret);
504 }
505
506 pci_set_master(pdev);
507
508
509
510
511
512 dd->ipath_pcibar0 = addr;
513 dd->ipath_pcibar1 = addr >> 32;
514 dd->ipath_deviceid = ent->device;
515 dd->ipath_vendorid = ent->vendor;
516
517
518 switch (ent->device) {
519 case PCI_DEVICE_ID_INFINIPATH_HT:
520 ipath_init_iba6110_funcs(dd);
521 break;
522
523 default:
524 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
525 "failing\n", ent->device);
526 return -ENODEV;
527 }
528
529 for (j = 0; j < 6; j++) {
530 if (!pdev->resource[j].start)
531 continue;
532 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
533 j, &pdev->resource[j],
534 (unsigned long long)pci_resource_len(pdev, j));
535 }
536
537 if (!addr) {
538 ipath_dev_err(dd, "No valid address in BAR 0!\n");
539 ret = -ENODEV;
540 goto bail_regions;
541 }
542
543 dd->ipath_pcirev = pdev->revision;
544
545#if defined(__powerpc__)
546
547 dd->ipath_kregbase = __ioremap(addr, len,
548 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
549#else
550 dd->ipath_kregbase = ioremap_nocache(addr, len);
551#endif
552
553 if (!dd->ipath_kregbase) {
554 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
555 addr);
556 ret = -ENOMEM;
557 goto bail_iounmap;
558 }
559 dd->ipath_kregend = (u64 __iomem *)
560 ((void __iomem *)dd->ipath_kregbase + len);
561 dd->ipath_physaddr = addr;
562
563 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
564 addr, dd->ipath_kregbase);
565
566 if (dd->ipath_f_bus(dd, pdev))
567 ipath_dev_err(dd, "Failed to setup config space; "
568 "continuing anyway\n");
569
570
571
572
573
574
575
576 if (!dd->ipath_irq)
577 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
578 "work\n");
579 else {
580 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
581 IPATH_DRV_NAME, dd);
582 if (ret) {
583 ipath_dev_err(dd, "Couldn't setup irq handler, "
584 "irq=%d: %d\n", dd->ipath_irq, ret);
585 goto bail_iounmap;
586 }
587 }
588
589 ret = ipath_init_chip(dd, 0);
590 if (ret)
591 goto bail_irqsetup;
592
593 ret = ipath_enable_wc(dd);
594
595 if (ret) {
596 ipath_dev_err(dd, "Write combining not enabled "
597 "(err %d): performance may be poor\n",
598 -ret);
599 ret = 0;
600 }
601
602 ipath_verify_pioperf(dd);
603
604 ipath_device_create_group(&pdev->dev, dd);
605 ipathfs_add_device(dd);
606 ipath_user_add(dd);
607 ipath_diag_add(dd);
608 ipath_register_ib_device(dd);
609
610 goto bail;
611
612bail_irqsetup:
613 cleanup_device(dd);
614
615 if (dd->ipath_irq)
616 dd->ipath_f_free_irq(dd);
617
618 if (dd->ipath_f_cleanup)
619 dd->ipath_f_cleanup(dd);
620
621bail_iounmap:
622 iounmap((volatile void __iomem *) dd->ipath_kregbase);
623
624bail_regions:
625 pci_release_regions(pdev);
626
627bail_disable:
628 pci_disable_device(pdev);
629
630bail_devdata:
631 ipath_free_devdata(pdev, dd);
632
633bail:
634 return ret;
635}
636
637static void cleanup_device(struct ipath_devdata *dd)
638{
639 int port;
640 struct ipath_portdata **tmp;
641 unsigned long flags;
642
643 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
644
645 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
646 if (dd->ipath_kregbase) {
647
648
649
650
651
652 dd->ipath_kregbase = NULL;
653 dd->ipath_uregbase = 0;
654 dd->ipath_sregbase = 0;
655 dd->ipath_cregbase = 0;
656 dd->ipath_kregsize = 0;
657 }
658 ipath_disable_wc(dd);
659 }
660
661 if (dd->ipath_spectriggerhit)
662 dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
663 dd->ipath_spectriggerhit);
664
665 if (dd->ipath_pioavailregs_dma) {
666 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
667 (void *) dd->ipath_pioavailregs_dma,
668 dd->ipath_pioavailregs_phys);
669 dd->ipath_pioavailregs_dma = NULL;
670 }
671 if (dd->ipath_dummy_hdrq) {
672 dma_free_coherent(&dd->pcidev->dev,
673 dd->ipath_pd[0]->port_rcvhdrq_size,
674 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
675 dd->ipath_dummy_hdrq = NULL;
676 }
677
678 if (dd->ipath_pageshadow) {
679 struct page **tmpp = dd->ipath_pageshadow;
680 dma_addr_t *tmpd = dd->ipath_physshadow;
681 int i, cnt = 0;
682
683 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
684 "locked\n");
685 for (port = 0; port < dd->ipath_cfgports; port++) {
686 int port_tidbase = port * dd->ipath_rcvtidcnt;
687 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
688 for (i = port_tidbase; i < maxtid; i++) {
689 if (!tmpp[i])
690 continue;
691 pci_unmap_page(dd->pcidev, tmpd[i],
692 PAGE_SIZE, PCI_DMA_FROMDEVICE);
693 ipath_release_user_pages(&tmpp[i], 1);
694 tmpp[i] = NULL;
695 cnt++;
696 }
697 }
698 if (cnt) {
699 ipath_stats.sps_pageunlocks += cnt;
700 ipath_cdbg(VERBOSE, "There were still %u expTID "
701 "entries locked\n", cnt);
702 }
703 if (ipath_stats.sps_pagelocks ||
704 ipath_stats.sps_pageunlocks)
705 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
706 "unlocked via ipath_m{un}lock\n",
707 (unsigned long long)
708 ipath_stats.sps_pagelocks,
709 (unsigned long long)
710 ipath_stats.sps_pageunlocks);
711
712 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
713 dd->ipath_pageshadow);
714 tmpp = dd->ipath_pageshadow;
715 dd->ipath_pageshadow = NULL;
716 vfree(tmpp);
717
718 dd->ipath_egrtidbase = NULL;
719 }
720
721
722
723
724
725
726
727
728 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
729 tmp = dd->ipath_pd;
730 dd->ipath_pd = NULL;
731 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
732 for (port = 0; port < dd->ipath_portcnt; port++) {
733 struct ipath_portdata *pd = tmp[port];
734 tmp[port] = NULL;
735 ipath_free_pddata(dd, pd);
736 }
737 kfree(tmp);
738}
739
740static void __devexit ipath_remove_one(struct pci_dev *pdev)
741{
742 struct ipath_devdata *dd = pci_get_drvdata(pdev);
743
744 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
745
746
747
748
749
750 ipath_shutdown_device(dd);
751
752 flush_workqueue(ib_wq);
753
754 if (dd->verbs_dev)
755 ipath_unregister_ib_device(dd->verbs_dev);
756
757 ipath_diag_remove(dd);
758 ipath_user_remove(dd);
759 ipathfs_remove_device(dd);
760 ipath_device_remove_group(&pdev->dev, dd);
761
762 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
763 "unit %u\n", dd, (u32) dd->ipath_unit);
764
765 cleanup_device(dd);
766
767
768
769
770
771
772
773 if (dd->ipath_irq) {
774 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
775 dd->ipath_unit, dd->ipath_irq);
776 dd->ipath_f_free_irq(dd);
777 } else
778 ipath_dbg("irq is 0, not doing free_irq "
779 "for unit %u\n", dd->ipath_unit);
780
781
782
783
784
785
786 if (dd->ipath_f_cleanup)
787
788 dd->ipath_f_cleanup(dd);
789
790 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
791 iounmap((volatile void __iomem *) dd->ipath_kregbase);
792 pci_release_regions(pdev);
793 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
794 pci_disable_device(pdev);
795
796 ipath_free_devdata(pdev, dd);
797}
798
799
800DEFINE_MUTEX(ipath_mutex);
801
802static DEFINE_SPINLOCK(ipath_pioavail_lock);
803
804
805
806
807
808
809
810
811
812
813
814
815void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
816 unsigned cnt)
817{
818 unsigned i, last = first + cnt;
819 unsigned long flags;
820
821 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
822 for (i = first; i < last; i++) {
823 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
824
825
826
827
828
829 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
830 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
831 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
832
833 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
834 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
835 }
836
837 ipath_force_pio_avail_update(dd);
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
853{
854 dd->ipath_state_wanted = state;
855 wait_event_interruptible_timeout(ipath_state_wait,
856 (dd->ipath_flags & state),
857 msecs_to_jiffies(msecs));
858 dd->ipath_state_wanted = 0;
859
860 if (!(dd->ipath_flags & state)) {
861 u64 val;
862 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
863 " ms\n",
864
865 (state & IPATH_LINKINIT) ? "INIT" :
866 ((state & IPATH_LINKDOWN) ? "DOWN" :
867 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
868 msecs);
869 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
870 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
871 (unsigned long long) ipath_read_kreg64(
872 dd, dd->ipath_kregs->kr_ibcctrl),
873 (unsigned long long) val,
874 ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
875 }
876 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
877}
878
879static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
880 char *buf, size_t blen)
881{
882 static const struct {
883 ipath_err_t err;
884 const char *msg;
885 } errs[] = {
886 { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
887 { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
888 { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
889 { INFINIPATH_E_SDMABASE, "SDmaBase" },
890 { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
891 { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
892 { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
893 { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
894 { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
895 { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
896 { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
897 { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
898 };
899 int i;
900 int expected;
901 size_t bidx = 0;
902
903 for (i = 0; i < ARRAY_SIZE(errs); i++) {
904 expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
905 test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
906 if ((err & errs[i].err) && !expected)
907 bidx += snprintf(buf + bidx, blen - bidx,
908 "%s ", errs[i].msg);
909 }
910}
911
912
913
914
915
916
917
918int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
919 ipath_err_t err)
920{
921 int iserr = 1;
922 *buf = '\0';
923 if (err & INFINIPATH_E_PKTERRS) {
924 if (!(err & ~INFINIPATH_E_PKTERRS))
925 iserr = 0;
926 if (ipath_debug & __IPATH_ERRPKTDBG) {
927 if (err & INFINIPATH_E_REBP)
928 strlcat(buf, "EBP ", blen);
929 if (err & INFINIPATH_E_RVCRC)
930 strlcat(buf, "VCRC ", blen);
931 if (err & INFINIPATH_E_RICRC) {
932 strlcat(buf, "CRC ", blen);
933
934 err &= INFINIPATH_E_RICRC;
935 }
936 if (err & INFINIPATH_E_RSHORTPKTLEN)
937 strlcat(buf, "rshortpktlen ", blen);
938 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
939 strlcat(buf, "sdroppeddatapkt ", blen);
940 if (err & INFINIPATH_E_SPKTLEN)
941 strlcat(buf, "spktlen ", blen);
942 }
943 if ((err & INFINIPATH_E_RICRC) &&
944 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
945 strlcat(buf, "CRC ", blen);
946 if (!iserr)
947 goto done;
948 }
949 if (err & INFINIPATH_E_RHDRLEN)
950 strlcat(buf, "rhdrlen ", blen);
951 if (err & INFINIPATH_E_RBADTID)
952 strlcat(buf, "rbadtid ", blen);
953 if (err & INFINIPATH_E_RBADVERSION)
954 strlcat(buf, "rbadversion ", blen);
955 if (err & INFINIPATH_E_RHDR)
956 strlcat(buf, "rhdr ", blen);
957 if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
958 strlcat(buf, "sendspecialtrigger ", blen);
959 if (err & INFINIPATH_E_RLONGPKTLEN)
960 strlcat(buf, "rlongpktlen ", blen);
961 if (err & INFINIPATH_E_RMAXPKTLEN)
962 strlcat(buf, "rmaxpktlen ", blen);
963 if (err & INFINIPATH_E_RMINPKTLEN)
964 strlcat(buf, "rminpktlen ", blen);
965 if (err & INFINIPATH_E_SMINPKTLEN)
966 strlcat(buf, "sminpktlen ", blen);
967 if (err & INFINIPATH_E_RFORMATERR)
968 strlcat(buf, "rformaterr ", blen);
969 if (err & INFINIPATH_E_RUNSUPVL)
970 strlcat(buf, "runsupvl ", blen);
971 if (err & INFINIPATH_E_RUNEXPCHAR)
972 strlcat(buf, "runexpchar ", blen);
973 if (err & INFINIPATH_E_RIBFLOW)
974 strlcat(buf, "ribflow ", blen);
975 if (err & INFINIPATH_E_SUNDERRUN)
976 strlcat(buf, "sunderrun ", blen);
977 if (err & INFINIPATH_E_SPIOARMLAUNCH)
978 strlcat(buf, "spioarmlaunch ", blen);
979 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
980 strlcat(buf, "sunexperrpktnum ", blen);
981 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
982 strlcat(buf, "sdroppedsmppkt ", blen);
983 if (err & INFINIPATH_E_SMAXPKTLEN)
984 strlcat(buf, "smaxpktlen ", blen);
985 if (err & INFINIPATH_E_SUNSUPVL)
986 strlcat(buf, "sunsupVL ", blen);
987 if (err & INFINIPATH_E_INVALIDADDR)
988 strlcat(buf, "invalidaddr ", blen);
989 if (err & INFINIPATH_E_RRCVEGRFULL)
990 strlcat(buf, "rcvegrfull ", blen);
991 if (err & INFINIPATH_E_RRCVHDRFULL)
992 strlcat(buf, "rcvhdrfull ", blen);
993 if (err & INFINIPATH_E_IBSTATUSCHANGED)
994 strlcat(buf, "ibcstatuschg ", blen);
995 if (err & INFINIPATH_E_RIBLOSTLINK)
996 strlcat(buf, "riblostlink ", blen);
997 if (err & INFINIPATH_E_HARDWARE)
998 strlcat(buf, "hardware ", blen);
999 if (err & INFINIPATH_E_RESET)
1000 strlcat(buf, "reset ", blen);
1001 if (err & INFINIPATH_E_SDMAERRS)
1002 decode_sdma_errs(dd, err, buf, blen);
1003 if (err & INFINIPATH_E_INVALIDEEPCMD)
1004 strlcat(buf, "invalideepromcmd ", blen);
1005done:
1006 return iserr;
1007}
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static void get_rhf_errstring(u32 err, char *msg, size_t len)
1018{
1019
1020 *msg = '\0';
1021
1022 if (err & INFINIPATH_RHF_H_ICRCERR)
1023 strlcat(msg, "icrcerr ", len);
1024 if (err & INFINIPATH_RHF_H_VCRCERR)
1025 strlcat(msg, "vcrcerr ", len);
1026 if (err & INFINIPATH_RHF_H_PARITYERR)
1027 strlcat(msg, "parityerr ", len);
1028 if (err & INFINIPATH_RHF_H_LENERR)
1029 strlcat(msg, "lenerr ", len);
1030 if (err & INFINIPATH_RHF_H_MTUERR)
1031 strlcat(msg, "mtuerr ", len);
1032 if (err & INFINIPATH_RHF_H_IHDRERR)
1033
1034 strlcat(msg, "ipathhdrerr ", len);
1035 if (err & INFINIPATH_RHF_H_TIDERR)
1036 strlcat(msg, "tiderr ", len);
1037 if (err & INFINIPATH_RHF_H_MKERR)
1038
1039 strlcat(msg, "invalid ipathhdr ", len);
1040 if (err & INFINIPATH_RHF_H_IBERR)
1041 strlcat(msg, "iberr ", len);
1042 if (err & INFINIPATH_RHF_L_SWA)
1043 strlcat(msg, "swA ", len);
1044 if (err & INFINIPATH_RHF_L_SWB)
1045 strlcat(msg, "swB ", len);
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1056{
1057 return dd->ipath_port0_skbinfo ?
1058 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
1059}
1060
1061
1062
1063
1064
1065
1066struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
1067 gfp_t gfp_mask)
1068{
1069 struct sk_buff *skb;
1070 u32 len;
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 len = dd->ipath_ibmaxlen + 4;
1085
1086 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1087
1088
1089
1090
1091 len += 2047;
1092 }
1093
1094 skb = __dev_alloc_skb(len, gfp_mask);
1095 if (!skb) {
1096 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
1097 len);
1098 goto bail;
1099 }
1100
1101 skb_reserve(skb, 4);
1102
1103 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1104 u32 una = (unsigned long)skb->data & 2047;
1105 if (una)
1106 skb_reserve(skb, 2048 - una);
1107 }
1108
1109bail:
1110 return skb;
1111}
1112
1113static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1114 u32 eflags,
1115 u32 l,
1116 u32 etail,
1117 __le32 *rhf_addr,
1118 struct ipath_message_header *hdr)
1119{
1120 char emsg[128];
1121
1122 get_rhf_errstring(eflags, emsg, sizeof emsg);
1123 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
1124 "tlen=%x opcode=%x egridx=%x: %s\n",
1125 eflags, l,
1126 ipath_hdrget_rcv_type(rhf_addr),
1127 ipath_hdrget_length_in_bytes(rhf_addr),
1128 be32_to_cpu(hdr->bth[0]) >> 24,
1129 etail, emsg);
1130
1131
1132 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
1133 u8 n = (dd->ipath_ibcctrl >>
1134 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1135 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1136
1137 if (++dd->ipath_lli_counter > n) {
1138 dd->ipath_lli_counter = 0;
1139 dd->ipath_lli_errors++;
1140 }
1141 }
1142}
1143
1144
1145
1146
1147
1148
1149
1150void ipath_kreceive(struct ipath_portdata *pd)
1151{
1152 struct ipath_devdata *dd = pd->port_dd;
1153 __le32 *rhf_addr;
1154 void *ebuf;
1155 const u32 rsize = dd->ipath_rcvhdrentsize;
1156 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize;
1157 u32 etail = -1, l, hdrqtail;
1158 struct ipath_message_header *hdr;
1159 u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
1160 static u64 totcalls;
1161 int last;
1162
1163 l = pd->port_head;
1164 rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
1165 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1166 u32 seq = ipath_hdrget_seq(rhf_addr);
1167
1168 if (seq != pd->port_seq_cnt)
1169 goto bail;
1170 hdrqtail = 0;
1171 } else {
1172 hdrqtail = ipath_get_rcvhdrtail(pd);
1173 if (l == hdrqtail)
1174 goto bail;
1175 smp_rmb();
1176 }
1177
1178reloop:
1179 for (last = 0, i = 1; !last; i += !last) {
1180 hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
1181 eflags = ipath_hdrget_err_flags(rhf_addr);
1182 etype = ipath_hdrget_rcv_type(rhf_addr);
1183
1184 tlen = ipath_hdrget_length_in_bytes(rhf_addr);
1185 ebuf = NULL;
1186 if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
1187 ipath_hdrget_use_egr_buf(rhf_addr) :
1188 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
1189
1190
1191
1192
1193
1194
1195
1196 etail = ipath_hdrget_index(rhf_addr);
1197 updegr = 1;
1198 if (tlen > sizeof(*hdr) ||
1199 etype == RCVHQ_RCV_TYPE_NON_KD)
1200 ebuf = ipath_get_egrbuf(dd, etail);
1201 }
1202
1203
1204
1205
1206
1207
1208 if (etype != RCVHQ_RCV_TYPE_NON_KD &&
1209 etype != RCVHQ_RCV_TYPE_ERROR &&
1210 ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
1211 IPS_PROTO_VERSION)
1212 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1213 "%x\n", etype);
1214
1215 if (unlikely(eflags))
1216 ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
1217 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
1218 ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
1219 if (dd->ipath_lli_counter)
1220 dd->ipath_lli_counter--;
1221 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
1222 u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
1223 u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
1224 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1225 "qp=%x), len %x; ignored\n",
1226 etype, opcode, qp, tlen);
1227 }
1228 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1229 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1230 be32_to_cpu(hdr->bth[0]) >> 24);
1231 else {
1232
1233
1234
1235
1236
1237
1238
1239 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
1240 " %x, len %x hdrq+%x rhf: %Lx\n",
1241 etail, tlen, l, (unsigned long long)
1242 le64_to_cpu(*(__le64 *) rhf_addr));
1243 if (ipath_debug & __IPATH_ERRPKTDBG) {
1244 u32 j, *d, dw = rsize-2;
1245 if (rsize > (tlen>>2))
1246 dw = tlen>>2;
1247 d = (u32 *)hdr;
1248 printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
1249 dw);
1250 for (j = 0; j < dw; j++)
1251 printk(KERN_DEBUG "%8x%s", d[j],
1252 (j%8) == 7 ? "\n" : " ");
1253 printk(KERN_DEBUG ".\n");
1254 }
1255 }
1256 l += rsize;
1257 if (l >= maxcnt)
1258 l = 0;
1259 rhf_addr = (__le32 *) pd->port_rcvhdrq +
1260 l + dd->ipath_rhf_offset;
1261 if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
1262 u32 seq = ipath_hdrget_seq(rhf_addr);
1263
1264 if (++pd->port_seq_cnt > 13)
1265 pd->port_seq_cnt = 1;
1266 if (seq != pd->port_seq_cnt)
1267 last = 1;
1268 } else if (l == hdrqtail)
1269 last = 1;
1270
1271
1272
1273
1274
1275 if (last || !(i & 0xf)) {
1276 u64 lval = l;
1277
1278
1279 if (last)
1280 lval |= dd->ipath_rhdrhead_intr_off;
1281 ipath_write_ureg(dd, ur_rcvhdrhead, lval,
1282 pd->port_port);
1283 if (updegr) {
1284 ipath_write_ureg(dd, ur_rcvegrindexhead,
1285 etail, pd->port_port);
1286 updegr = 0;
1287 }
1288 }
1289 }
1290
1291 if (!dd->ipath_rhdrhead_intr_off && !reloop &&
1292 !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1293
1294
1295
1296
1297
1298
1299
1300
1301 u32 hqtail = ipath_get_rcvhdrtail(pd);
1302 if (hqtail != hdrqtail) {
1303 hdrqtail = hqtail;
1304 reloop = 1;
1305 goto reloop;
1306 }
1307 }
1308
1309 pkttot += i;
1310
1311 pd->port_head = l;
1312
1313 if (pkttot > ipath_stats.sps_maxpkts_call)
1314 ipath_stats.sps_maxpkts_call = pkttot;
1315 ipath_stats.sps_port0pkts += pkttot;
1316 ipath_stats.sps_avgpkts_call =
1317 ipath_stats.sps_port0pkts / ++totcalls;
1318
1319bail:;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1332{
1333 unsigned long flags;
1334 int i;
1335 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 if (!dd->ipath_pioavailregs_dma) {
1355 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1356 return;
1357 }
1358 if (ipath_debug & __IPATH_VERBDBG) {
1359
1360 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1361 unsigned long *shadow = dd->ipath_pioavailshadow;
1362
1363 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1364 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1365 "s3=%lx\n",
1366 (unsigned long long) le64_to_cpu(dma[0]),
1367 shadow[0],
1368 (unsigned long long) le64_to_cpu(dma[1]),
1369 shadow[1],
1370 (unsigned long long) le64_to_cpu(dma[2]),
1371 shadow[2],
1372 (unsigned long long) le64_to_cpu(dma[3]),
1373 shadow[3]);
1374 if (piobregs > 4)
1375 ipath_cdbg(
1376 PKT, "2nd group, dma4=%llx shad4=%lx, "
1377 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1378 "d7=%llx s7=%lx\n",
1379 (unsigned long long) le64_to_cpu(dma[4]),
1380 shadow[4],
1381 (unsigned long long) le64_to_cpu(dma[5]),
1382 shadow[5],
1383 (unsigned long long) le64_to_cpu(dma[6]),
1384 shadow[6],
1385 (unsigned long long) le64_to_cpu(dma[7]),
1386 shadow[7]);
1387 }
1388 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1389 for (i = 0; i < piobregs; i++) {
1390 u64 pchbusy, pchg, piov, pnew;
1391
1392
1393
1394 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1395 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1396 else
1397 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1398 pchg = dd->ipath_pioavailkernel[i] &
1399 ~(dd->ipath_pioavailshadow[i] ^ piov);
1400 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1401 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1402 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1403 pnew |= piov & pchbusy;
1404 dd->ipath_pioavailshadow[i] = pnew;
1405 }
1406 }
1407 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1408}
1409
1410
1411
1412
1413
1414
1415
1416static void ipath_reset_availshadow(struct ipath_devdata *dd)
1417{
1418 int i, im;
1419 unsigned long flags;
1420
1421 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1422 for (i = 0; i < dd->ipath_pioavregs; i++) {
1423 u64 val, oldval;
1424
1425 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1426 i ^ 1 : i;
1427 val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
1428
1429
1430
1431
1432 oldval = dd->ipath_pioavailshadow[i];
1433 dd->ipath_pioavailshadow[i] = val |
1434 ((~dd->ipath_pioavailkernel[i] <<
1435 INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
1436 0xaaaaaaaaaaaaaaaaULL);
1437 if (oldval != dd->ipath_pioavailshadow[i])
1438 ipath_dbg("shadow[%d] was %Lx, now %lx\n",
1439 i, (unsigned long long) oldval,
1440 dd->ipath_pioavailshadow[i]);
1441 }
1442 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1453{
1454 int ret = 0;
1455
1456 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1457 if (dd->ipath_rcvhdrsize != rhdrsize) {
1458 dev_info(&dd->pcidev->dev,
1459 "Error: can't set protocol header "
1460 "size %u, already %u\n",
1461 rhdrsize, dd->ipath_rcvhdrsize);
1462 ret = -EAGAIN;
1463 } else
1464 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1465 "size %u\n", dd->ipath_rcvhdrsize);
1466 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1467 (sizeof(u64) / sizeof(u32)))) {
1468 ipath_dbg("Error: can't set protocol header size %u "
1469 "(> max %u)\n", rhdrsize,
1470 dd->ipath_rcvhdrentsize -
1471 (u32) (sizeof(u64) / sizeof(u32)));
1472 ret = -EOVERFLOW;
1473 } else {
1474 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1475 dd->ipath_rcvhdrsize = rhdrsize;
1476 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1477 dd->ipath_rcvhdrsize);
1478 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1479 dd->ipath_rcvhdrsize);
1480 }
1481 return ret;
1482}
1483
1484
1485
1486
1487static noinline void no_pio_bufs(struct ipath_devdata *dd)
1488{
1489 unsigned long *shadow = dd->ipath_pioavailshadow;
1490 __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
1491
1492 dd->ipath_upd_pio_shadow = 1;
1493
1494
1495
1496
1497 ipath_stats.sps_nopiobufs++;
1498 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1499 ipath_force_pio_avail_update(dd);
1500 ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
1501 "%llx %llx %llx %llx\n"
1502 "ipath shadow: %lx %lx %lx %lx\n",
1503 dd->ipath_consec_nopiobuf,
1504 (unsigned long)get_cycles(),
1505 (unsigned long long) le64_to_cpu(dma[0]),
1506 (unsigned long long) le64_to_cpu(dma[1]),
1507 (unsigned long long) le64_to_cpu(dma[2]),
1508 (unsigned long long) le64_to_cpu(dma[3]),
1509 shadow[0], shadow[1], shadow[2], shadow[3]);
1510
1511
1512
1513
1514 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1515 (sizeof(shadow[0]) * 4 * 4))
1516 ipath_dbg("2nd group: dmacopy: "
1517 "%llx %llx %llx %llx\n"
1518 "ipath shadow: %lx %lx %lx %lx\n",
1519 (unsigned long long)le64_to_cpu(dma[4]),
1520 (unsigned long long)le64_to_cpu(dma[5]),
1521 (unsigned long long)le64_to_cpu(dma[6]),
1522 (unsigned long long)le64_to_cpu(dma[7]),
1523 shadow[4], shadow[5], shadow[6], shadow[7]);
1524
1525
1526 ipath_reset_availshadow(dd);
1527 }
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
1538 u32 *pbufnum, u32 first, u32 last, u32 firsti)
1539{
1540 int i, j, updated = 0;
1541 unsigned piobcnt;
1542 unsigned long flags;
1543 unsigned long *shadow = dd->ipath_pioavailshadow;
1544 u32 __iomem *buf;
1545
1546 piobcnt = last - first;
1547 if (dd->ipath_upd_pio_shadow) {
1548
1549
1550
1551
1552
1553 ipath_update_pio_bufs(dd);
1554 updated++;
1555 i = first;
1556 } else
1557 i = firsti;
1558rescan:
1559
1560
1561
1562
1563
1564 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1565 for (j = 0; j < piobcnt; j++, i++) {
1566 if (i >= last)
1567 i = first;
1568 if (__test_and_set_bit((2 * i) + 1, shadow))
1569 continue;
1570
1571 __change_bit(2 * i, shadow);
1572 break;
1573 }
1574 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1575
1576 if (j == piobcnt) {
1577 if (!updated) {
1578
1579
1580
1581
1582 ipath_update_pio_bufs(dd);
1583 updated++;
1584 i = first;
1585 goto rescan;
1586 } else if (updated == 1 && piobcnt <=
1587 ((dd->ipath_sendctrl
1588 >> INFINIPATH_S_UPDTHRESH_SHIFT) &
1589 INFINIPATH_S_UPDTHRESH_MASK)) {
1590
1591
1592
1593
1594
1595
1596 ipath_force_pio_avail_update(dd);
1597 ipath_update_pio_bufs(dd);
1598 updated++;
1599 i = first;
1600 goto rescan;
1601 }
1602
1603 no_pio_bufs(dd);
1604 buf = NULL;
1605 } else {
1606 if (i < dd->ipath_piobcnt2k)
1607 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1608 i * dd->ipath_palign);
1609 else
1610 buf = (u32 __iomem *)
1611 (dd->ipath_pio4kbase +
1612 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1613 if (pbufnum)
1614 *pbufnum = i;
1615 }
1616
1617 return buf;
1618}
1619
1620
1621
1622
1623
1624
1625
1626u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
1627{
1628 u32 __iomem *buf;
1629 u32 pnum, nbufs;
1630 u32 first, lasti;
1631
1632 if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
1633 first = dd->ipath_piobcnt2k;
1634 lasti = dd->ipath_lastpioindexl;
1635 } else {
1636 first = 0;
1637 lasti = dd->ipath_lastpioindex;
1638 }
1639 nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
1640 buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
1641
1642 if (buf) {
1643
1644
1645
1646
1647 if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
1648 dd->ipath_lastpioindexl = pnum + 1;
1649 else
1650 dd->ipath_lastpioindex = pnum + 1;
1651 if (dd->ipath_upd_pio_shadow)
1652 dd->ipath_upd_pio_shadow = 0;
1653 if (dd->ipath_consec_nopiobuf)
1654 dd->ipath_consec_nopiobuf = 0;
1655 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1656 pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1657 if (pbufnum)
1658 *pbufnum = pnum;
1659
1660 }
1661 return buf;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1672 unsigned len, int avail)
1673{
1674 unsigned long flags;
1675 unsigned end, cnt = 0;
1676
1677
1678 start *= 2;
1679 end = start + len * 2;
1680
1681 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1682
1683 while (start < end) {
1684 if (avail) {
1685 unsigned long dma;
1686 int i, im;
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 i = start / BITS_PER_LONG;
1702 im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
1703 i ^ 1 : i;
1704 __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
1705 + start, dd->ipath_pioavailshadow);
1706 dma = (unsigned long) le64_to_cpu(
1707 dd->ipath_pioavailregs_dma[im]);
1708 if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1709 + start) % BITS_PER_LONG, &dma))
1710 __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1711 + start, dd->ipath_pioavailshadow);
1712 else
1713 __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
1714 + start, dd->ipath_pioavailshadow);
1715 __set_bit(start, dd->ipath_pioavailkernel);
1716 } else {
1717 __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
1718 dd->ipath_pioavailshadow);
1719 __clear_bit(start, dd->ipath_pioavailkernel);
1720 }
1721 start += 2;
1722 }
1723
1724 if (dd->ipath_pioupd_thresh) {
1725 end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1726 cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
1727 }
1728 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 if (!avail && len < cnt)
1742 cnt = len;
1743 if (cnt < dd->ipath_pioupd_thresh) {
1744 dd->ipath_pioupd_thresh = cnt;
1745 ipath_dbg("Decreased pio update threshold to %u\n",
1746 dd->ipath_pioupd_thresh);
1747 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1748 dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
1749 << INFINIPATH_S_UPDTHRESH_SHIFT);
1750 dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
1751 << INFINIPATH_S_UPDTHRESH_SHIFT;
1752 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1753 dd->ipath_sendctrl);
1754 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1755 }
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1768 struct ipath_portdata *pd)
1769{
1770 int ret = 0;
1771
1772 if (!pd->port_rcvhdrq) {
1773 dma_addr_t phys_hdrqtail;
1774 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
1775 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1776 sizeof(u32), PAGE_SIZE);
1777
1778 pd->port_rcvhdrq = dma_alloc_coherent(
1779 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1780 gfp_flags);
1781
1782 if (!pd->port_rcvhdrq) {
1783 ipath_dev_err(dd, "attempt to allocate %d bytes "
1784 "for port %u rcvhdrq failed\n",
1785 amt, pd->port_port);
1786 ret = -ENOMEM;
1787 goto bail;
1788 }
1789
1790 if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
1791 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1792 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1793 GFP_KERNEL);
1794 if (!pd->port_rcvhdrtail_kvaddr) {
1795 ipath_dev_err(dd, "attempt to allocate 1 page "
1796 "for port %u rcvhdrqtailaddr "
1797 "failed\n", pd->port_port);
1798 ret = -ENOMEM;
1799 dma_free_coherent(&dd->pcidev->dev, amt,
1800 pd->port_rcvhdrq,
1801 pd->port_rcvhdrq_phys);
1802 pd->port_rcvhdrq = NULL;
1803 goto bail;
1804 }
1805 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
1806 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
1807 "physical\n", pd->port_port,
1808 (unsigned long long) phys_hdrqtail);
1809 }
1810
1811 pd->port_rcvhdrq_size = amt;
1812
1813 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1814 "for port %u rcvhdr Q\n",
1815 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1816 (unsigned long) pd->port_rcvhdrq_phys,
1817 (unsigned long) pd->port_rcvhdrq_size,
1818 pd->port_port);
1819 }
1820 else
1821 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1822 "hdrtailaddr@%p %llx physical\n",
1823 pd->port_port, pd->port_rcvhdrq,
1824 (unsigned long long) pd->port_rcvhdrq_phys,
1825 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1826 pd->port_rcvhdrqtailaddr_phys);
1827
1828
1829 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1830 if (pd->port_rcvhdrtail_kvaddr)
1831 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1832
1833
1834
1835
1836
1837 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1838 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
1839 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1840 pd->port_port, pd->port_rcvhdrq_phys);
1841
1842bail:
1843 return ret;
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
1857{
1858 unsigned long flags;
1859
1860 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
1861 ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
1862 goto bail;
1863 }
1864
1865
1866
1867
1868
1869
1870 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
1871 int skip_cancel;
1872 unsigned long *statp = &dd->ipath_sdma_status;
1873
1874 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1875 skip_cancel =
1876 test_and_set_bit(IPATH_SDMA_ABORTING, statp)
1877 && !test_bit(IPATH_SDMA_DISABLED, statp);
1878 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1879 if (skip_cancel)
1880 goto bail;
1881 }
1882
1883 ipath_dbg("Cancelling all in-progress send buffers\n");
1884
1885
1886 dd->ipath_lastcancel = jiffies + HZ / 2;
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1897 dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
1898 | INFINIPATH_S_PIOENABLE);
1899 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1900 dd->ipath_sendctrl | INFINIPATH_S_ABORT);
1901 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1902 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1903
1904
1905 ipath_disarm_piobufs(dd, 0,
1906 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
1907
1908 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
1909 set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
1910
1911 if (restore_sendctrl) {
1912
1913 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1914 dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
1915 INFINIPATH_S_PIOENABLE;
1916 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1917 dd->ipath_sendctrl);
1918
1919 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1920 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1921 }
1922
1923 if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
1924 !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
1925 test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
1926 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
1927
1928 dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
1929 dd->ipath_sdma_reset_wait = 200;
1930 if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
1931 tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
1932 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
1933 }
1934bail:;
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945void ipath_force_pio_avail_update(struct ipath_devdata *dd)
1946{
1947 unsigned long flags;
1948
1949 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1950 if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
1951 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1952 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
1953 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1954 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1955 dd->ipath_sendctrl);
1956 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1957 }
1958 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1959}
1960
1961static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
1962 int linitcmd)
1963{
1964 u64 mod_wd;
1965 static const char *what[4] = {
1966 [0] = "NOP",
1967 [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
1968 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1969 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1970 };
1971
1972 if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
1973
1974
1975
1976
1977 preempt_disable();
1978 dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
1979 preempt_enable();
1980 } else if (linitcmd) {
1981
1982
1983
1984
1985
1986 preempt_disable();
1987 dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
1988 preempt_enable();
1989 }
1990
1991 mod_wd = (linkcmd << dd->ibcc_lc_shift) |
1992 (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1993 ipath_cdbg(VERBOSE,
1994 "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
1995 dd->ipath_unit, what[linkcmd], linitcmd,
1996 ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
1997 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
1998
1999 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2000 dd->ipath_ibcctrl | mod_wd);
2001
2002 (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2003}
2004
2005int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
2006{
2007 u32 lstate;
2008 int ret;
2009
2010 switch (newstate) {
2011 case IPATH_IB_LINKDOWN_ONLY:
2012 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
2013
2014 ret = 0;
2015 goto bail;
2016
2017 case IPATH_IB_LINKDOWN:
2018 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2019 INFINIPATH_IBCC_LINKINITCMD_POLL);
2020
2021 ret = 0;
2022 goto bail;
2023
2024 case IPATH_IB_LINKDOWN_SLEEP:
2025 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2026 INFINIPATH_IBCC_LINKINITCMD_SLEEP);
2027
2028 ret = 0;
2029 goto bail;
2030
2031 case IPATH_IB_LINKDOWN_DISABLE:
2032 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
2033 INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2034
2035 ret = 0;
2036 goto bail;
2037
2038 case IPATH_IB_LINKARM:
2039 if (dd->ipath_flags & IPATH_LINKARMED) {
2040 ret = 0;
2041 goto bail;
2042 }
2043 if (!(dd->ipath_flags &
2044 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
2045 ret = -EINVAL;
2046 goto bail;
2047 }
2048 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
2049
2050
2051
2052
2053
2054 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
2055 break;
2056
2057 case IPATH_IB_LINKACTIVE:
2058 if (dd->ipath_flags & IPATH_LINKACTIVE) {
2059 ret = 0;
2060 goto bail;
2061 }
2062 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
2063 ret = -EINVAL;
2064 goto bail;
2065 }
2066 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
2067 lstate = IPATH_LINKACTIVE;
2068 break;
2069
2070 case IPATH_IB_LINK_LOOPBACK:
2071 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
2072 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
2073 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2074 dd->ipath_ibcctrl);
2075
2076
2077 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2078 IPATH_IB_HRTBT_OFF);
2079
2080 ret = 0;
2081 goto bail;
2082
2083 case IPATH_IB_LINK_EXTERNAL:
2084 dev_info(&dd->pcidev->dev,
2085 "Disabling IB local loopback (normal)\n");
2086 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2087 IPATH_IB_HRTBT_ON);
2088 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
2089 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2090 dd->ipath_ibcctrl);
2091
2092 ret = 0;
2093 goto bail;
2094
2095
2096
2097
2098
2099
2100
2101 case IPATH_IB_LINK_HRTBT:
2102 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2103 IPATH_IB_HRTBT_ON);
2104 goto bail;
2105
2106 case IPATH_IB_LINK_NO_HRTBT:
2107 ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
2108 IPATH_IB_HRTBT_OFF);
2109 goto bail;
2110
2111 default:
2112 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
2113 ret = -EINVAL;
2114 goto bail;
2115 }
2116 ret = ipath_wait_linkstate(dd, lstate, 2000);
2117
2118bail:
2119 return ret;
2120}
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
2135{
2136 u32 piosize;
2137 int changed = 0;
2138 int ret;
2139
2140
2141
2142
2143
2144
2145
2146 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
2147 (arg != 4096 || !ipath_mtu4096)) {
2148 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
2149 ret = -EINVAL;
2150 goto bail;
2151 }
2152 if (dd->ipath_ibmtu == arg) {
2153 ret = 0;
2154 goto bail;
2155 }
2156
2157 piosize = dd->ipath_ibmaxlen;
2158 dd->ipath_ibmtu = arg;
2159
2160 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
2161
2162 if (piosize != dd->ipath_init_ibmaxlen) {
2163 if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
2164 piosize = dd->ipath_init_ibmaxlen;
2165 dd->ipath_ibmaxlen = piosize;
2166 changed = 1;
2167 }
2168 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
2169 piosize = arg + IPATH_PIO_MAXIBHDR;
2170 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
2171 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
2172 arg);
2173 dd->ipath_ibmaxlen = piosize;
2174 changed = 1;
2175 }
2176
2177 if (changed) {
2178 u64 ibc = dd->ipath_ibcctrl, ibdw;
2179
2180
2181
2182
2183
2184 dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
2185 ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
2186 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
2187 dd->ibcc_mpl_shift);
2188 ibc |= ibdw << dd->ibcc_mpl_shift;
2189 dd->ipath_ibcctrl = ibc;
2190 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
2191 dd->ipath_ibcctrl);
2192 dd->ipath_f_tidtemplate(dd);
2193 }
2194
2195 ret = 0;
2196
2197bail:
2198 return ret;
2199}
2200
2201int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
2202{
2203 dd->ipath_lid = lid;
2204 dd->ipath_lmc = lmc;
2205
2206 dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
2207 (~((1U << lmc) - 1)) << 16);
2208
2209 dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
2210
2211 return 0;
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
2226 unsigned port, u64 value)
2227{
2228 u16 where;
2229
2230 if (port < dd->ipath_portcnt &&
2231 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
2232 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
2233 where = regno + port;
2234 else
2235 where = -1;
2236
2237 ipath_write_kreg(dd, where, value);
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247#define LED_OVER_FREQ_SHIFT 8
2248#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
2249
2250#define LED_OVER_BOTH_OFF (8)
2251
2252static void ipath_run_led_override(unsigned long opaque)
2253{
2254 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2255 int timeoff;
2256 int pidx;
2257 u64 lstate, ltstate, val;
2258
2259 if (!(dd->ipath_flags & IPATH_INITTED))
2260 return;
2261
2262 pidx = dd->ipath_led_override_phase++ & 1;
2263 dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
2264 timeoff = dd->ipath_led_override_timeoff;
2265
2266
2267
2268
2269
2270
2271 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
2272 ltstate = ipath_ib_linktrstate(dd, val);
2273 lstate = ipath_ib_linkstate(dd, val);
2274
2275 dd->ipath_f_setextled(dd, lstate, ltstate);
2276 mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
2277}
2278
2279void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2280{
2281 int timeoff, freq;
2282
2283 if (!(dd->ipath_flags & IPATH_INITTED))
2284 return;
2285
2286
2287 timeoff = HZ;
2288 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
2289
2290 if (freq) {
2291
2292 dd->ipath_led_override_vals[0] = val & 0xF;
2293 dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
2294 timeoff = (HZ << 4)/freq;
2295 } else {
2296
2297 dd->ipath_led_override_vals[0] = val & 0xF;
2298 dd->ipath_led_override_vals[1] = val & 0xF;
2299 }
2300 dd->ipath_led_override_timeoff = timeoff;
2301
2302
2303
2304
2305
2306 if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
2307
2308 init_timer(&dd->ipath_led_override_timer);
2309 dd->ipath_led_override_timer.function =
2310 ipath_run_led_override;
2311 dd->ipath_led_override_timer.data = (unsigned long) dd;
2312 dd->ipath_led_override_timer.expires = jiffies + 1;
2313 add_timer(&dd->ipath_led_override_timer);
2314 } else
2315 atomic_dec(&dd->ipath_led_override_timer_active);
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327void ipath_shutdown_device(struct ipath_devdata *dd)
2328{
2329 unsigned long flags;
2330
2331 ipath_dbg("Shutting down the device\n");
2332
2333 ipath_hol_up(dd);
2334
2335 dd->ipath_flags |= IPATH_LINKUNK;
2336 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
2337 IPATH_LINKINIT | IPATH_LINKARMED |
2338 IPATH_LINKACTIVE);
2339 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
2340 IPATH_STATUS_IB_READY);
2341
2342
2343 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2344
2345 dd->ipath_rcvctrl = 0;
2346 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
2347 dd->ipath_rcvctrl);
2348
2349 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2350 teardown_sdma(dd);
2351
2352
2353
2354
2355
2356 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2357 dd->ipath_sendctrl = 0;
2358 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2359
2360 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2361 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2362
2363
2364
2365
2366
2367 udelay(5);
2368
2369 dd->ipath_f_setextled(dd, 0, 0);
2370
2371 ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
2372 ipath_cancel_sends(dd, 0);
2373
2374
2375
2376
2377
2378
2379 signal_ib_event(dd, IB_EVENT_PORT_ERR);
2380
2381
2382 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
2383 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2384 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
2385
2386
2387
2388
2389
2390
2391 dd->ipath_f_quiet_serdes(dd);
2392
2393
2394 del_timer_sync(&dd->ipath_hol_timer);
2395 if (dd->ipath_stats_timer_active) {
2396 del_timer_sync(&dd->ipath_stats_timer);
2397 dd->ipath_stats_timer_active = 0;
2398 }
2399 if (dd->ipath_intrchk_timer.data) {
2400 del_timer_sync(&dd->ipath_intrchk_timer);
2401 dd->ipath_intrchk_timer.data = 0;
2402 }
2403 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2404 del_timer_sync(&dd->ipath_led_override_timer);
2405 atomic_set(&dd->ipath_led_override_timer_active, 0);
2406 }
2407
2408
2409
2410
2411
2412
2413 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
2414 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
2415 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
2416 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
2417
2418 ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
2419 ipath_update_eeprom_log(dd);
2420}
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
2435{
2436 if (!pd)
2437 return;
2438
2439 if (pd->port_rcvhdrq) {
2440 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
2441 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
2442 (unsigned long) pd->port_rcvhdrq_size);
2443 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
2444 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
2445 pd->port_rcvhdrq = NULL;
2446 if (pd->port_rcvhdrtail_kvaddr) {
2447 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
2448 pd->port_rcvhdrtail_kvaddr,
2449 pd->port_rcvhdrqtailaddr_phys);
2450 pd->port_rcvhdrtail_kvaddr = NULL;
2451 }
2452 }
2453 if (pd->port_port && pd->port_rcvegrbuf) {
2454 unsigned e;
2455
2456 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
2457 void *base = pd->port_rcvegrbuf[e];
2458 size_t size = pd->port_rcvegrbuf_size;
2459
2460 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
2461 "chunk %u/%u\n", base,
2462 (unsigned long) size,
2463 e, pd->port_rcvegrbuf_chunks);
2464 dma_free_coherent(&dd->pcidev->dev, size,
2465 base, pd->port_rcvegrbuf_phys[e]);
2466 }
2467 kfree(pd->port_rcvegrbuf);
2468 pd->port_rcvegrbuf = NULL;
2469 kfree(pd->port_rcvegrbuf_phys);
2470 pd->port_rcvegrbuf_phys = NULL;
2471 pd->port_rcvegrbuf_chunks = 0;
2472 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
2473 unsigned e;
2474 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
2475
2476 dd->ipath_port0_skbinfo = NULL;
2477 ipath_cdbg(VERBOSE, "free closed port %d "
2478 "ipath_port0_skbinfo @ %p\n", pd->port_port,
2479 skbinfo);
2480 for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
2481 if (skbinfo[e].skb) {
2482 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
2483 dd->ipath_ibmaxlen,
2484 PCI_DMA_FROMDEVICE);
2485 dev_kfree_skb(skbinfo[e].skb);
2486 }
2487 vfree(skbinfo);
2488 }
2489 kfree(pd->port_tid_pg_list);
2490 vfree(pd->subport_uregbase);
2491 vfree(pd->subport_rcvegrbuf);
2492 vfree(pd->subport_rcvhdr_base);
2493 kfree(pd);
2494}
2495
2496static int __init infinipath_init(void)
2497{
2498 int ret;
2499
2500 if (ipath_debug & __IPATH_DBG)
2501 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
2502
2503
2504
2505
2506
2507 idr_init(&unit_table);
2508 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2509 printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
2510 ret = -ENOMEM;
2511 goto bail;
2512 }
2513
2514 ret = pci_register_driver(&ipath_driver);
2515 if (ret < 0) {
2516 printk(KERN_ERR IPATH_DRV_NAME
2517 ": Unable to register driver: error %d\n", -ret);
2518 goto bail_unit;
2519 }
2520
2521 ret = ipath_init_ipathfs();
2522 if (ret < 0) {
2523 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2524 "ipathfs: error %d\n", -ret);
2525 goto bail_pci;
2526 }
2527
2528 goto bail;
2529
2530bail_pci:
2531 pci_unregister_driver(&ipath_driver);
2532
2533bail_unit:
2534 idr_destroy(&unit_table);
2535
2536bail:
2537 return ret;
2538}
2539
2540static void __exit infinipath_cleanup(void)
2541{
2542 ipath_exit_ipathfs();
2543
2544 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2545 pci_unregister_driver(&ipath_driver);
2546
2547 idr_destroy(&unit_table);
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559int ipath_reset_device(int unit)
2560{
2561 int ret, i;
2562 struct ipath_devdata *dd = ipath_lookup(unit);
2563 unsigned long flags;
2564
2565 if (!dd) {
2566 ret = -ENODEV;
2567 goto bail;
2568 }
2569
2570 if (atomic_read(&dd->ipath_led_override_timer_active)) {
2571
2572 del_timer_sync(&dd->ipath_led_override_timer);
2573 atomic_set(&dd->ipath_led_override_timer_active, 0);
2574 }
2575
2576
2577 dd->ipath_led_override = LED_OVER_BOTH_OFF;
2578 dd->ipath_f_setextled(dd, 0, 0);
2579
2580 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2581
2582 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2583 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2584 "not initialized or not present\n", unit);
2585 ret = -ENXIO;
2586 goto bail;
2587 }
2588
2589 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2590 if (dd->ipath_pd)
2591 for (i = 1; i < dd->ipath_cfgports; i++) {
2592 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2593 continue;
2594 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2595 ipath_dbg("unit %u port %d is in use "
2596 "(PID %u cmd %s), can't reset\n",
2597 unit, i,
2598 pid_nr(dd->ipath_pd[i]->port_pid),
2599 dd->ipath_pd[i]->port_comm);
2600 ret = -EBUSY;
2601 goto bail;
2602 }
2603 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2604
2605 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2606 teardown_sdma(dd);
2607
2608 dd->ipath_flags &= ~IPATH_INITTED;
2609 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
2610 ret = dd->ipath_f_reset(dd);
2611 if (ret == 1) {
2612 ipath_dbg("Reinitializing unit %u after reset attempt\n",
2613 unit);
2614 ret = ipath_init_chip(dd, 1);
2615 } else
2616 ret = -EAGAIN;
2617 if (ret)
2618 ipath_dev_err(dd, "Reinitialize unit %u after "
2619 "reset failed with %d\n", unit, ret);
2620 else
2621 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2622 "resetting\n", unit);
2623
2624bail:
2625 return ret;
2626}
2627
2628
2629
2630
2631
2632
2633static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
2634{
2635 int i, sub, any = 0;
2636 struct pid *pid;
2637 unsigned long flags;
2638
2639 if (!dd->ipath_pd)
2640 return 0;
2641
2642 spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
2643 for (i = 1; i < dd->ipath_cfgports; i++) {
2644 if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
2645 continue;
2646 pid = dd->ipath_pd[i]->port_pid;
2647 if (!pid)
2648 continue;
2649
2650 dev_info(&dd->pcidev->dev, "context %d in use "
2651 "(PID %u), sending signal %d\n",
2652 i, pid_nr(pid), sig);
2653 kill_pid(pid, sig, 1);
2654 any++;
2655 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
2656 pid = dd->ipath_pd[i]->port_subpid[sub];
2657 if (!pid)
2658 continue;
2659 dev_info(&dd->pcidev->dev, "sub-context "
2660 "%d:%d in use (PID %u), sending "
2661 "signal %d\n", i, sub, pid_nr(pid), sig);
2662 kill_pid(pid, sig, 1);
2663 any++;
2664 }
2665 }
2666 spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
2667 return any;
2668}
2669
2670static void ipath_hol_signal_down(struct ipath_devdata *dd)
2671{
2672 if (ipath_signal_procs(dd, SIGSTOP))
2673 ipath_dbg("Stopped some processes\n");
2674 ipath_cancel_sends(dd, 1);
2675}
2676
2677
2678static void ipath_hol_signal_up(struct ipath_devdata *dd)
2679{
2680 if (ipath_signal_procs(dd, SIGCONT))
2681 ipath_dbg("Continued some processes\n");
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691void ipath_hol_down(struct ipath_devdata *dd)
2692{
2693 dd->ipath_hol_state = IPATH_HOL_DOWN;
2694 ipath_hol_signal_down(dd);
2695 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2696 dd->ipath_hol_timer.expires = jiffies +
2697 msecs_to_jiffies(ipath_hol_timeout_ms);
2698 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2699}
2700
2701
2702
2703
2704
2705
2706void ipath_hol_up(struct ipath_devdata *dd)
2707{
2708 ipath_hol_signal_up(dd);
2709 dd->ipath_hol_state = IPATH_HOL_UP;
2710}
2711
2712
2713
2714
2715
2716
2717
2718void ipath_hol_event(unsigned long opaque)
2719{
2720 struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
2721
2722 if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
2723 && dd->ipath_hol_state != IPATH_HOL_UP) {
2724 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2725 ipath_dbg("Stopping processes\n");
2726 ipath_hol_signal_down(dd);
2727 } else {
2728 dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
2729 ipath_dbg("Continuing processes\n");
2730 ipath_hol_signal_up(dd);
2731 }
2732 if (dd->ipath_hol_state == IPATH_HOL_UP)
2733 ipath_dbg("link's up, don't resched timer\n");
2734 else {
2735 dd->ipath_hol_timer.expires = jiffies +
2736 msecs_to_jiffies(ipath_hol_timeout_ms);
2737 mod_timer(&dd->ipath_hol_timer,
2738 dd->ipath_hol_timer.expires);
2739 }
2740}
2741
2742int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2743{
2744 u64 val;
2745
2746 if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
2747 return -1;
2748 if (dd->ipath_rx_pol_inv != new_pol_inv) {
2749 dd->ipath_rx_pol_inv = new_pol_inv;
2750 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2751 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2752 INFINIPATH_XGXS_RX_POL_SHIFT);
2753 val |= ((u64)dd->ipath_rx_pol_inv) <<
2754 INFINIPATH_XGXS_RX_POL_SHIFT;
2755 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2756 }
2757 return 0;
2758}
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769void ipath_enable_armlaunch(struct ipath_devdata *dd)
2770{
2771 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2772 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2773 INFINIPATH_E_SPIOARMLAUNCH);
2774 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2775 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2776 dd->ipath_errormask);
2777}
2778
2779void ipath_disable_armlaunch(struct ipath_devdata *dd)
2780{
2781
2782 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2783 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2784 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2785 dd->ipath_errormask);
2786}
2787
2788module_init(infinipath_init);
2789module_exit(infinipath_cleanup);
2790