1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/slab.h>
49#include <linux/io.h>
50#include <linux/fs.h>
51#include <linux/completion.h>
52#include <linux/kref.h>
53#include <linux/sched.h>
54#include <linux/kthread.h>
55#include <rdma/ib_hdrs.h>
56#include <rdma/rdma_vt.h>
57
58#include "qib_common.h"
59#include "qib_verbs.h"
60
61
62#define QIB_CHIP_VERS_MAJ 2U
63
64
65#define QIB_CHIP_VERS_MIN 0U
66
67
68#define QIB_OUI 0x001175
69#define QIB_OUI_LSB 40
70
71
72
73
74
75
76
77
78
79struct qlogic_ib_stats {
80 __u64 sps_ints;
81 __u64 sps_errints;
82 __u64 sps_txerrs;
83 __u64 sps_rcverrs;
84 __u64 sps_hwerrs;
85 __u64 sps_nopiobufs;
86 __u64 sps_ctxts;
87 __u64 sps_lenerrs;
88 __u64 sps_buffull;
89 __u64 sps_hdrfull;
90};
91
92extern struct qlogic_ib_stats qib_stats;
93extern const struct pci_error_handlers qib_pci_err_handler;
94
95#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
96
97
98
99
100
101
102#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
103
104
105
106
107
108
109
110#define QIB_EEP_LOG_CNT (4)
111struct qib_eep_log_mask {
112 u64 errs_to_log;
113 u64 hwerrs_to_log;
114};
115
116
117
118
119
120#ifdef CONFIG_DEBUG_FS
121struct qib_opcode_stats_perctx;
122#endif
123
124struct qib_ctxtdata {
125 void **rcvegrbuf;
126 dma_addr_t *rcvegrbuf_phys;
127
128 void *rcvhdrq;
129
130 void *rcvhdrtail_kvaddr;
131
132
133
134
135 void *tid_pg_list;
136
137
138
139
140
141 unsigned long *user_event_mask;
142
143 wait_queue_head_t wait;
144
145
146
147
148 dma_addr_t rcvegr_phys;
149
150 dma_addr_t rcvhdrq_phys;
151 dma_addr_t rcvhdrqtailaddr_phys;
152
153
154
155
156
157 int cnt;
158
159
160
161
162
163 unsigned ctxt;
164
165 int node_id;
166
167 u16 subctxt_cnt;
168
169 u16 subctxt_id;
170
171 u16 rcvegrcnt;
172
173 u16 rcvegr_tid_base;
174
175 u32 piocnt;
176
177 u32 pio_base;
178
179 u32 piobufs;
180
181 u32 rcvegrbuf_chunks;
182
183 u16 rcvegrbufs_perchunk;
184
185 u16 rcvegrbufs_perchunk_shift;
186
187 size_t rcvegrbuf_size;
188
189 size_t rcvhdrq_size;
190
191 unsigned long flag;
192
193 u32 tidcursor;
194
195 u32 rcvwait_to;
196
197 u32 piowait_to;
198
199 u32 rcvnowait;
200
201 u32 pionowait;
202
203 u32 urgent;
204
205 u32 urgent_poll;
206
207 pid_t pid;
208 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
209
210 char comm[16];
211
212 u16 pkeys[4];
213
214 struct qib_devdata *dd;
215
216 struct qib_pportdata *ppd;
217
218 void *subctxt_uregbase;
219
220 void *subctxt_rcvegrbuf;
221
222 void *subctxt_rcvhdr_base;
223
224 u32 userversion;
225
226 u32 active_slaves;
227
228 u16 poll_type;
229
230 u8 seq_cnt;
231 u8 redirect_seq_cnt;
232
233 u32 head;
234
235 struct list_head qp_wait_list;
236#ifdef CONFIG_DEBUG_FS
237
238 struct qib_opcode_stats_perctx *opstats;
239#endif
240};
241
242struct rvt_sge_state;
243
244struct qib_sdma_txreq {
245 int flags;
246 int sg_count;
247 dma_addr_t addr;
248 void (*callback)(struct qib_sdma_txreq *, int);
249 u16 start_idx;
250 u16 next_descq_idx;
251 struct list_head list;
252};
253
254struct qib_sdma_desc {
255 __le64 qw[2];
256};
257
258struct qib_verbs_txreq {
259 struct qib_sdma_txreq txreq;
260 struct rvt_qp *qp;
261 struct rvt_swqe *wqe;
262 u32 dwords;
263 u16 hdr_dwords;
264 u16 hdr_inx;
265 struct qib_pio_header *align_buf;
266 struct rvt_mregion *mr;
267 struct rvt_sge_state *ss;
268};
269
270#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
271#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
272#define QIB_SDMA_TXREQ_F_INTREQ 0x4
273#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
274#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
275
276#define QIB_SDMA_TXREQ_S_OK 0
277#define QIB_SDMA_TXREQ_S_SENDERROR 1
278#define QIB_SDMA_TXREQ_S_ABORTED 2
279#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
280
281
282
283
284
285
286#define QIB_IB_CFG_LIDLMC 0
287#define QIB_IB_CFG_LWID_ENB 2
288#define QIB_IB_CFG_LWID 3
289#define QIB_IB_CFG_SPD_ENB 4
290#define QIB_IB_CFG_SPD 5
291#define QIB_IB_CFG_RXPOL_ENB 6
292#define QIB_IB_CFG_LREV_ENB 7
293#define QIB_IB_CFG_LINKLATENCY 8
294#define QIB_IB_CFG_HRTBT 9
295#define QIB_IB_CFG_OP_VLS 10
296#define QIB_IB_CFG_VL_HIGH_CAP 11
297#define QIB_IB_CFG_VL_LOW_CAP 12
298#define QIB_IB_CFG_OVERRUN_THRESH 13
299#define QIB_IB_CFG_PHYERR_THRESH 14
300#define QIB_IB_CFG_LINKDEFAULT 15
301#define QIB_IB_CFG_PKEYS 16
302#define QIB_IB_CFG_MTU 17
303#define QIB_IB_CFG_LSTATE 18
304#define QIB_IB_CFG_VL_HIGH_LIMIT 19
305#define QIB_IB_CFG_PMA_TICKS 20
306#define QIB_IB_CFG_PORT 21
307
308
309
310
311
312
313#define IB_LINKCMD_DOWN (0 << 16)
314#define IB_LINKCMD_ARMED (1 << 16)
315#define IB_LINKCMD_ACTIVE (2 << 16)
316#define IB_LINKINITCMD_NOP 0
317#define IB_LINKINITCMD_POLL 1
318#define IB_LINKINITCMD_SLEEP 2
319#define IB_LINKINITCMD_DISABLE 3
320
321
322
323
324#define QIB_IB_LINKDOWN 0
325#define QIB_IB_LINKARM 1
326#define QIB_IB_LINKACTIVE 2
327#define QIB_IB_LINKDOWN_ONLY 3
328#define QIB_IB_LINKDOWN_SLEEP 4
329#define QIB_IB_LINKDOWN_DISABLE 5
330
331
332
333
334
335
336
337
338#define QIB_IB_SDR 1
339#define QIB_IB_DDR 2
340#define QIB_IB_QDR 4
341
342#define QIB_DEFAULT_MTU 4096
343
344
345#define QIB_MAX_IB_PORTS 2
346
347
348
349
350#define QIB_IB_TBL_VL_HIGH_ARB 1
351#define QIB_IB_TBL_VL_LOW_ARB 2
352
353
354
355
356
357
358#define QIB_RCVCTRL_TAILUPD_ENB 0x01
359#define QIB_RCVCTRL_TAILUPD_DIS 0x02
360#define QIB_RCVCTRL_CTXT_ENB 0x04
361#define QIB_RCVCTRL_CTXT_DIS 0x08
362#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
363#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
364#define QIB_RCVCTRL_PKEY_ENB 0x40
365#define QIB_RCVCTRL_PKEY_DIS 0x80
366#define QIB_RCVCTRL_BP_ENB 0x0100
367#define QIB_RCVCTRL_BP_DIS 0x0200
368#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
369#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
370
371
372
373
374
375
376
377
378#define QIB_SENDCTRL_DISARM (0x1000)
379#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
380
381#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
382#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
383#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
384#define QIB_SENDCTRL_SEND_DIS (0x20000)
385#define QIB_SENDCTRL_SEND_ENB (0x40000)
386#define QIB_SENDCTRL_FLUSH (0x80000)
387#define QIB_SENDCTRL_CLEAR (0x100000)
388#define QIB_SENDCTRL_DISARM_ALL (0x200000)
389
390
391
392
393
394
395
396
397#define QIBPORTCNTR_PKTSEND 0U
398#define QIBPORTCNTR_WORDSEND 1U
399#define QIBPORTCNTR_PSXMITDATA 2U
400#define QIBPORTCNTR_PSXMITPKTS 3U
401#define QIBPORTCNTR_PSXMITWAIT 4U
402#define QIBPORTCNTR_SENDSTALL 5U
403
404#define QIBPORTCNTR_PKTRCV 6U
405#define QIBPORTCNTR_PSRCVDATA 7U
406#define QIBPORTCNTR_PSRCVPKTS 8U
407#define QIBPORTCNTR_RCVEBP 9U
408#define QIBPORTCNTR_RCVOVFL 10U
409#define QIBPORTCNTR_WORDRCV 11U
410
411#define QIBPORTCNTR_RXLOCALPHYERR 12U
412#define QIBPORTCNTR_RXVLERR 13U
413#define QIBPORTCNTR_ERRICRC 14U
414#define QIBPORTCNTR_ERRVCRC 15U
415#define QIBPORTCNTR_ERRLPCRC 16U
416#define QIBPORTCNTR_BADFORMAT 17U
417#define QIBPORTCNTR_ERR_RLEN 18U
418#define QIBPORTCNTR_IBSYMBOLERR 19U
419#define QIBPORTCNTR_INVALIDRLEN 20U
420#define QIBPORTCNTR_UNSUPVL 21U
421#define QIBPORTCNTR_EXCESSBUFOVFL 22U
422#define QIBPORTCNTR_ERRLINK 23U
423#define QIBPORTCNTR_IBLINKDOWN 24U
424#define QIBPORTCNTR_IBLINKERRRECOV 25U
425#define QIBPORTCNTR_LLI 26U
426
427#define QIBPORTCNTR_RXDROPPKT 27U
428#define QIBPORTCNTR_VL15PKTDROP 28U
429#define QIBPORTCNTR_ERRPKEY 29U
430#define QIBPORTCNTR_KHDROVFL 30U
431
432#define QIBPORTCNTR_PSINTERVAL 31U
433#define QIBPORTCNTR_PSSTART 32U
434#define QIBPORTCNTR_PSSTAT 33U
435
436
437#define ACTIVITY_TIMER 5
438
439#define MAX_NAME_SIZE 64
440
441#ifdef CONFIG_INFINIBAND_QIB_DCA
442struct qib_irq_notify;
443#endif
444
445struct qib_msix_entry {
446 struct msix_entry msix;
447 void *arg;
448#ifdef CONFIG_INFINIBAND_QIB_DCA
449 int dca;
450 int rcv;
451 struct qib_irq_notify *notifier;
452#endif
453 char name[MAX_NAME_SIZE];
454 cpumask_var_t mask;
455};
456
457
458
459
460
461
462struct qib_chip_specific;
463struct qib_chipport_specific;
464
465enum qib_sdma_states {
466 qib_sdma_state_s00_hw_down,
467 qib_sdma_state_s10_hw_start_up_wait,
468 qib_sdma_state_s20_idle,
469 qib_sdma_state_s30_sw_clean_up_wait,
470 qib_sdma_state_s40_hw_clean_up_wait,
471 qib_sdma_state_s50_hw_halt_wait,
472 qib_sdma_state_s99_running,
473};
474
475enum qib_sdma_events {
476 qib_sdma_event_e00_go_hw_down,
477 qib_sdma_event_e10_go_hw_start,
478 qib_sdma_event_e20_hw_started,
479 qib_sdma_event_e30_go_running,
480 qib_sdma_event_e40_sw_cleaned,
481 qib_sdma_event_e50_hw_cleaned,
482 qib_sdma_event_e60_hw_halted,
483 qib_sdma_event_e70_go_idle,
484 qib_sdma_event_e7220_err_halted,
485 qib_sdma_event_e7322_err_halted,
486 qib_sdma_event_e90_timer_tick,
487};
488
489extern char *qib_sdma_state_names[];
490extern char *qib_sdma_event_names[];
491
492struct sdma_set_state_action {
493 unsigned op_enable:1;
494 unsigned op_intenable:1;
495 unsigned op_halt:1;
496 unsigned op_drain:1;
497 unsigned go_s99_running_tofalse:1;
498 unsigned go_s99_running_totrue:1;
499};
500
501struct qib_sdma_state {
502 struct kref kref;
503 struct completion comp;
504 enum qib_sdma_states current_state;
505 struct sdma_set_state_action *set_state_action;
506 unsigned current_op;
507 unsigned go_s99_running;
508 unsigned first_sendbuf;
509 unsigned last_sendbuf;
510
511 enum qib_sdma_states previous_state;
512 unsigned previous_op;
513 enum qib_sdma_events last_event;
514};
515
516struct xmit_wait {
517 struct timer_list timer;
518 u64 counter;
519 u8 flags;
520 struct cache {
521 u64 psxmitdata;
522 u64 psrcvdata;
523 u64 psxmitpkts;
524 u64 psrcvpkts;
525 u64 psxmitwait;
526 } counter_cache;
527};
528
529
530
531
532
533
534
535struct qib_pportdata {
536 struct qib_ibport ibport_data;
537
538 struct qib_devdata *dd;
539 struct qib_chippport_specific *cpspec;
540 struct kobject pport_kobj;
541 struct kobject pport_cc_kobj;
542 struct kobject sl2vl_kobj;
543 struct kobject diagc_kobj;
544
545
546 __be64 guid;
547
548
549 u32 lflags;
550
551 u32 state_wanted;
552 spinlock_t lflags_lock;
553
554
555 atomic_t pkeyrefs[4];
556
557
558
559
560
561 u64 *statusp;
562
563
564
565
566 struct qib_sdma_desc *sdma_descq;
567 struct workqueue_struct *qib_wq;
568 struct qib_sdma_state sdma_state;
569 dma_addr_t sdma_descq_phys;
570 volatile __le64 *sdma_head_dma;
571 dma_addr_t sdma_head_phys;
572 u16 sdma_descq_cnt;
573
574
575 spinlock_t sdma_lock ____cacheline_aligned_in_smp;
576 struct list_head sdma_activelist;
577 struct list_head sdma_userpending;
578 u64 sdma_descq_added;
579 u64 sdma_descq_removed;
580 u16 sdma_descq_tail;
581 u16 sdma_descq_head;
582 u8 sdma_generation;
583 u8 sdma_intrequest;
584
585 struct tasklet_struct sdma_sw_clean_up_task
586 ____cacheline_aligned_in_smp;
587
588 wait_queue_head_t state_wait;
589
590
591 unsigned hol_state;
592 struct timer_list hol_timer;
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609 u64 lastibcstat;
610
611
612
613
614
615
616
617 unsigned long p_rcvctrl;
618 unsigned long p_sendctrl;
619
620 u32 ibmtu;
621
622
623
624
625 u32 ibmaxlen;
626
627
628
629
630 u32 init_ibmaxlen;
631
632 u16 lid;
633
634 u16 pkeys[4];
635
636 u8 lmc;
637 u8 link_width_supported;
638 u8 link_speed_supported;
639 u8 link_width_enabled;
640 u8 link_speed_enabled;
641 u8 link_width_active;
642 u8 link_speed_active;
643 u8 vls_supported;
644 u8 vls_operational;
645
646 u8 rx_pol_inv;
647
648 u8 hw_pidx;
649 u8 port;
650
651 u8 delay_mult;
652
653
654 u8 led_override;
655 u16 led_override_timeoff;
656 u8 led_override_vals[2];
657 u8 led_override_phase;
658 atomic_t led_override_timer_active;
659
660 struct timer_list led_override_timer;
661 struct xmit_wait cong_stats;
662 struct timer_list symerr_clear_timer;
663
664
665 spinlock_t cc_shadow_lock
666 ____cacheline_aligned_in_smp;
667
668
669 struct cc_table_shadow *ccti_entries_shadow;
670
671
672 struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
673
674
675 struct ib_cc_table_entry_shadow *ccti_entries;
676
677
678 struct ib_cc_congestion_entry_shadow *congestion_entries;
679
680
681
682
683 u16 cc_supported_table_entries;
684
685
686 u16 total_cct_entry;
687
688
689 u16 cc_sl_control_map;
690
691
692 u16 ccti_limit;
693
694
695 u8 cc_max_table_entries;
696};
697
698
699
700
701
702
703
704
705struct diag_observer;
706
707typedef int (*diag_hook) (struct qib_devdata *dd,
708 const struct diag_observer *op,
709 u32 offs, u64 *data, u64 mask, int only_32);
710
711struct diag_observer {
712 diag_hook hook;
713 u32 bottom;
714 u32 top;
715};
716
717extern int qib_register_observer(struct qib_devdata *dd,
718 const struct diag_observer *op);
719
720
721struct diag_observer_list_elt;
722
723
724
725
726
727
728struct qib_devdata {
729 struct qib_ibdev verbs_dev;
730 struct list_head list;
731
732
733 struct pci_dev *pcidev;
734 struct cdev *user_cdev;
735 struct cdev *diag_cdev;
736 struct device *user_device;
737 struct device *diag_device;
738
739
740 u64 __iomem *kregbase;
741
742 u64 __iomem *kregend;
743
744 resource_size_t physaddr;
745
746 struct qib_ctxtdata **rcd;
747
748
749
750
751 struct qib_pportdata *pport;
752 struct qib_chip_specific *cspec;
753
754
755 void __iomem *pio2kbase;
756
757 void __iomem *pio4kbase;
758
759 void __iomem *piobase;
760
761 u64 __iomem *userbase;
762 void __iomem *piovl15base;
763
764
765
766
767
768
769
770 volatile __le64 *pioavailregs_dma;
771
772 dma_addr_t pioavailregs_phys;
773
774
775
776
777
778
779
780 int (*f_intr_fallback)(struct qib_devdata *);
781
782 int (*f_reset)(struct qib_devdata *);
783 void (*f_quiet_serdes)(struct qib_pportdata *);
784 int (*f_bringup_serdes)(struct qib_pportdata *);
785 int (*f_early_init)(struct qib_devdata *);
786 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
787 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
788 u32, unsigned long);
789 void (*f_cleanup)(struct qib_devdata *);
790 void (*f_setextled)(struct qib_pportdata *, u32);
791
792 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
793
794 void (*f_free_irq)(struct qib_devdata *);
795 struct qib_message_header *(*f_get_msgheader)
796 (struct qib_devdata *, __le32 *);
797 void (*f_config_ctxts)(struct qib_devdata *);
798 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
799 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
800 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
801 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
802 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
803 u32 (*f_iblink_state)(u64);
804 u8 (*f_ibphys_portstate)(u64);
805 void (*f_xgxs_reset)(struct qib_pportdata *);
806
807 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
808 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
809
810 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
811 u32 mask);
812
813 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
814
815
816
817
818
819
820 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
821 int ctxt);
822
823 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
824 void (*f_set_intr_state)(struct qib_devdata *, u32);
825 void (*f_set_armlaunch)(struct qib_devdata *, u32);
826 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
827 int (*f_late_initreg)(struct qib_devdata *);
828 int (*f_init_sdma_regs)(struct qib_pportdata *);
829 u16 (*f_sdma_gethead)(struct qib_pportdata *);
830 int (*f_sdma_busy)(struct qib_pportdata *);
831 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
832 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
833 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
834 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
835 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
836 void (*f_sdma_init_early)(struct qib_pportdata *);
837 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
838 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
839 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
840 u64 (*f_portcntr)(struct qib_pportdata *, u32);
841 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
842 u64 **);
843 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
844 char **, u64 **);
845 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
846 void (*f_initvl15_bufs)(struct qib_devdata *);
847 void (*f_init_ctxt)(struct qib_ctxtdata *);
848 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
849 struct qib_ctxtdata *);
850 void (*f_writescratch)(struct qib_devdata *, u32);
851 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
852#ifdef CONFIG_INFINIBAND_QIB_DCA
853 int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
854#endif
855
856 char *boardname;
857
858
859 u64 tidtemplate;
860
861 u64 tidinvalid;
862
863
864 u32 pioavregs;
865
866 u32 flags;
867
868 u32 lastctxt_piobuf;
869
870
871 u64 z_int_counter;
872
873 u64 __percpu *int_counter;
874
875
876 u32 pbufsctxt;
877
878 u32 ctxts_extrabuf;
879
880
881
882
883 u32 cfgctxts;
884
885
886
887 u32 freectxts;
888
889
890
891
892
893 u32 upd_pio_shadow;
894
895
896 u32 maxpkts_call;
897 u32 avgpkts_call;
898 u64 nopiobufs;
899
900
901 u16 vendorid;
902
903 u16 deviceid;
904
905 int wc_cookie;
906 unsigned long wc_base;
907 unsigned long wc_len;
908
909
910 struct page **pageshadow;
911
912 dma_addr_t *physshadow;
913 u64 __iomem *egrtidbase;
914 spinlock_t sendctrl_lock;
915
916 spinlock_t uctxt_lock;
917
918
919
920
921
922 u64 *devstatusp;
923 char *freezemsg;
924 u32 freezelen;
925
926 struct timer_list stats_timer;
927
928
929 struct timer_list intrchk_timer;
930 unsigned long ureg_align;
931
932
933
934
935
936 spinlock_t pioavail_lock;
937
938
939
940 u32 last_pio;
941
942
943
944 u32 min_kernel_pio;
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960 unsigned long pioavailshadow[6];
961
962 unsigned long pioavailkernel[6];
963
964 unsigned long pio_need_disarm[3];
965
966 unsigned long pio_writing[3];
967
968 u64 revision;
969
970 __be64 base_guid;
971
972
973
974
975
976 u64 piobufbase;
977 u32 pio2k_bufbase;
978
979
980
981
982 u32 nguid;
983
984
985
986
987 unsigned long rcvctrl;
988 unsigned long sendctrl;
989
990
991 u32 rcvhdrcnt;
992
993 u32 rcvhdrsize;
994
995 u32 rcvhdrentsize;
996
997 u32 ctxtcnt;
998
999 u32 palign;
1000
1001 u32 piobcnt2k;
1002
1003 u32 piosize2k;
1004
1005 u32 piosize2kmax_dwords;
1006
1007 u32 piobcnt4k;
1008
1009 u32 piosize4k;
1010
1011 u32 rcvegrbase;
1012
1013 u32 rcvtidbase;
1014
1015 u32 rcvtidcnt;
1016
1017 u32 uregbase;
1018
1019 u32 control;
1020
1021
1022 u32 align4k;
1023
1024 u16 rcvegrbufsize;
1025
1026 u16 rcvegrbufsize_shift;
1027
1028 u32 lbus_width;
1029
1030 u32 lbus_speed;
1031 int unit;
1032
1033
1034
1035 u32 msi_lo;
1036
1037 u32 msi_hi;
1038
1039 u16 msi_data;
1040
1041 u32 pcibar0;
1042
1043 u32 pcibar1;
1044 u64 rhdrhead_intr_off;
1045
1046
1047
1048
1049
1050 u8 serial[16];
1051
1052 u8 boardversion[96];
1053 u8 lbus_info[32];
1054
1055 u8 majrev;
1056
1057 u8 minrev;
1058
1059
1060
1061 u8 num_pports;
1062
1063 u8 first_user_ctxt;
1064 u8 n_krcv_queues;
1065 u8 qpn_mask;
1066 u8 skip_kctxt_mask;
1067
1068 u16 rhf_offset;
1069
1070
1071
1072
1073 u8 gpio_sda_num;
1074 u8 gpio_scl_num;
1075 u8 twsi_eeprom_dev;
1076 u8 board_atten;
1077
1078
1079
1080 spinlock_t eep_st_lock;
1081
1082 struct mutex eep_lock;
1083 uint64_t traffic_wds;
1084
1085
1086
1087
1088 struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
1089 struct qib_diag_client *diag_client;
1090 spinlock_t qib_diag_trans_lock;
1091 struct diag_observer_list_elt *diag_observer_list;
1092
1093 u8 psxmitwait_supported;
1094
1095 u16 psxmitwait_check_rate;
1096
1097 struct tasklet_struct error_tasklet;
1098
1099 int assigned_node_id;
1100};
1101
1102
1103#define QIB_HOL_UP 0
1104#define QIB_HOL_INIT 1
1105
1106#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1107#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1108#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1109#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1110#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1111
1112
1113#define TXCHK_CHG_TYPE_DIS1 3
1114#define TXCHK_CHG_TYPE_ENAB1 2
1115#define TXCHK_CHG_TYPE_KERN 1
1116#define TXCHK_CHG_TYPE_USER 0
1117
1118#define QIB_CHASE_TIME msecs_to_jiffies(145)
1119#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1120
1121
1122struct qib_filedata {
1123 struct qib_ctxtdata *rcd;
1124 unsigned subctxt;
1125 unsigned tidcursor;
1126 struct qib_user_sdma_queue *pq;
1127 int rec_cpu_num;
1128};
1129
1130extern struct list_head qib_dev_list;
1131extern spinlock_t qib_devs_lock;
1132extern struct qib_devdata *qib_lookup(int unit);
1133extern u32 qib_cpulist_count;
1134extern unsigned long *qib_cpulist;
1135extern unsigned qib_cc_table_size;
1136
1137int qib_init(struct qib_devdata *, int);
1138int init_chip_wc_pat(struct qib_devdata *dd, u32);
1139int qib_enable_wc(struct qib_devdata *dd);
1140void qib_disable_wc(struct qib_devdata *dd);
1141int qib_count_units(int *npresentp, int *nupp);
1142int qib_count_active_units(void);
1143
1144int qib_cdev_init(int minor, const char *name,
1145 const struct file_operations *fops,
1146 struct cdev **cdevp, struct device **devp);
1147void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1148int qib_dev_init(void);
1149void qib_dev_cleanup(void);
1150
1151int qib_diag_add(struct qib_devdata *);
1152void qib_diag_remove(struct qib_devdata *);
1153void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1154void qib_sdma_update_tail(struct qib_pportdata *, u16);
1155
1156int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1157void qib_bad_intrstatus(struct qib_devdata *);
1158void qib_handle_urcv(struct qib_devdata *, u64);
1159
1160
1161void qib_chip_cleanup(struct qib_devdata *);
1162
1163void qib_chip_done(void);
1164
1165
1166int qib_unordered_wc(void);
1167void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1168
1169void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1170int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1171void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1172void qib_cancel_sends(struct qib_pportdata *);
1173
1174int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1175int qib_setup_eagerbufs(struct qib_ctxtdata *);
1176void qib_set_ctxtcnt(struct qib_devdata *);
1177int qib_create_ctxts(struct qib_devdata *dd);
1178struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
1179int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1180void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1181
1182u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1183int qib_reset_device(int);
1184int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1185int qib_set_linkstate(struct qib_pportdata *, u8);
1186int qib_set_mtu(struct qib_pportdata *, u16);
1187int qib_set_lid(struct qib_pportdata *, u32, u8);
1188void qib_hol_down(struct qib_pportdata *);
1189void qib_hol_init(struct qib_pportdata *);
1190void qib_hol_up(struct qib_pportdata *);
1191void qib_hol_event(unsigned long);
1192void qib_disable_after_error(struct qib_devdata *);
1193int qib_set_uevent_bits(struct qib_pportdata *, const int);
1194
1195
1196#define ctxt_fp(fp) \
1197 (((struct qib_filedata *)(fp)->private_data)->rcd)
1198#define subctxt_fp(fp) \
1199 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1200#define tidcursor_fp(fp) \
1201 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1202#define user_sdma_queue_fp(fp) \
1203 (((struct qib_filedata *)(fp)->private_data)->pq)
1204
1205static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1206{
1207 return ppd->dd;
1208}
1209
1210static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1211{
1212 return container_of(dev, struct qib_devdata, verbs_dev);
1213}
1214
1215static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1216{
1217 return dd_from_dev(to_idev(ibdev));
1218}
1219
1220static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1221{
1222 return container_of(ibp, struct qib_pportdata, ibport_data);
1223}
1224
1225static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1226{
1227 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1228 unsigned pidx = port - 1;
1229
1230 WARN_ON(pidx >= dd->num_pports);
1231 return &dd->pport[pidx].ibport_data;
1232}
1233
1234
1235
1236
1237#define QIB_HAS_LINK_LATENCY 0x1
1238#define QIB_INITTED 0x2
1239#define QIB_DOING_RESET 0x4
1240#define QIB_PRESENT 0x8
1241#define QIB_PIO_FLUSH_WC 0x10
1242#define QIB_HAS_THRESH_UPDATE 0x40
1243#define QIB_HAS_SDMA_TIMEOUT 0x80
1244#define QIB_USE_SPCL_TRIG 0x100
1245#define QIB_NODMA_RTAIL 0x200
1246#define QIB_HAS_INTX 0x800
1247#define QIB_HAS_SEND_DMA 0x1000
1248#define QIB_HAS_VLSUPP 0x2000
1249#define QIB_HAS_HDRSUPP 0x4000
1250#define QIB_BADINTR 0x8000
1251#define QIB_DCA_ENABLED 0x10000
1252#define QIB_HAS_QSFP 0x20000
1253
1254
1255
1256
1257#define QIBL_LINKV 0x1
1258#define QIBL_LINKDOWN 0x8
1259#define QIBL_LINKINIT 0x10
1260#define QIBL_LINKARMED 0x20
1261#define QIBL_LINKACTIVE 0x40
1262
1263#define QIBL_IB_AUTONEG_INPROG 0x1000
1264#define QIBL_IB_AUTONEG_FAILED 0x2000
1265#define QIBL_IB_LINK_DISABLED 0x4000
1266
1267#define QIBL_IB_FORCE_NOTIFY 0x8000
1268
1269
1270#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1271
1272
1273
1274
1275#define QIB_CTXT_WAITING_RCV 2
1276
1277#define QIB_CTXT_MASTER_UNINIT 4
1278
1279#define QIB_CTXT_WAITING_URG 5
1280
1281
1282void qib_free_data(struct qib_ctxtdata *dd);
1283void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1284 u32, struct qib_ctxtdata *);
1285struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1286 const struct pci_device_id *);
1287struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1288 const struct pci_device_id *);
1289struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1290 const struct pci_device_id *);
1291void qib_free_devdata(struct qib_devdata *);
1292struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1293
1294#define QIB_TWSI_NO_DEV 0xFF
1295
1296int qib_twsi_reset(struct qib_devdata *dd);
1297int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1298 int len);
1299int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1300 const void *buffer, int len);
1301void qib_get_eeprom_info(struct qib_devdata *);
1302#define qib_inc_eeprom_err(dd, eidx, incr)
1303void qib_dump_lookup_output_queue(struct qib_devdata *);
1304void qib_force_pio_avail_update(struct qib_devdata *);
1305void qib_clear_symerror_on_linkup(unsigned long opaque);
1306
1307
1308
1309
1310
1311
1312#define QIB_LED_PHYS 1
1313#define QIB_LED_LOG 2
1314void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1315
1316
1317int qib_setup_sdma(struct qib_pportdata *);
1318void qib_teardown_sdma(struct qib_pportdata *);
1319void __qib_sdma_intr(struct qib_pportdata *);
1320void qib_sdma_intr(struct qib_pportdata *);
1321void qib_user_sdma_send_desc(struct qib_pportdata *dd,
1322 struct list_head *pktlist);
1323int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
1324 u32, struct qib_verbs_txreq *);
1325
1326int qib_sdma_make_progress(struct qib_pportdata *dd);
1327
1328static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
1329{
1330 return ppd->sdma_descq_added == ppd->sdma_descq_removed;
1331}
1332
1333
1334static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1335{
1336 return ppd->sdma_descq_cnt -
1337 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1338}
1339
1340static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1341{
1342 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1343}
1344int qib_sdma_running(struct qib_pportdata *);
1345void dump_sdma_state(struct qib_pportdata *ppd);
1346void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1347void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1348
1349
1350
1351
1352#define QIB_DFLT_RCVHDRSIZE 9
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365#define QIB_RCVHDR_ENTSIZE 32
1366
1367int qib_get_user_pages(unsigned long, size_t, struct page **);
1368void qib_release_user_pages(struct page **, size_t);
1369int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1370int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1371u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1372void qib_sendbuf_done(struct qib_devdata *, unsigned);
1373
1374static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1375{
1376 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1377}
1378
1379static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1380{
1381
1382
1383
1384
1385 return (u32) le64_to_cpu(
1386 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr));
1387}
1388
1389static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1390{
1391 const struct qib_devdata *dd = rcd->dd;
1392 u32 hdrqtail;
1393
1394 if (dd->flags & QIB_NODMA_RTAIL) {
1395 __le32 *rhf_addr;
1396 u32 seq;
1397
1398 rhf_addr = (__le32 *) rcd->rcvhdrq +
1399 rcd->head + dd->rhf_offset;
1400 seq = qib_hdrget_seq(rhf_addr);
1401 hdrqtail = rcd->head;
1402 if (seq == rcd->seq_cnt)
1403 hdrqtail++;
1404 } else
1405 hdrqtail = qib_get_rcvhdrtail(rcd);
1406
1407 return hdrqtail;
1408}
1409
1410
1411
1412
1413
1414extern const char ib_qib_version[];
1415
1416int qib_device_create(struct qib_devdata *);
1417void qib_device_remove(struct qib_devdata *);
1418
1419int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1420 struct kobject *kobj);
1421int qib_verbs_register_sysfs(struct qib_devdata *);
1422void qib_verbs_unregister_sysfs(struct qib_devdata *);
1423
1424extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1425
1426int __init qib_init_qibfs(void);
1427int __exit qib_exit_qibfs(void);
1428
1429int qibfs_add(struct qib_devdata *);
1430int qibfs_remove(struct qib_devdata *);
1431
1432int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1433int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1434 const struct pci_device_id *);
1435void qib_pcie_ddcleanup(struct qib_devdata *);
1436int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
1437int qib_reinit_intr(struct qib_devdata *);
1438void qib_enable_intx(struct pci_dev *);
1439void qib_nomsi(struct qib_devdata *);
1440void qib_nomsix(struct qib_devdata *);
1441void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1442void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1443
1444u64 qib_int_counter(struct qib_devdata *);
1445
1446u64 qib_sps_ints(void);
1447
1448
1449
1450
1451dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1452 size_t, int);
1453const char *qib_get_unit_name(int unit);
1454const char *qib_get_card_name(struct rvt_dev_info *rdi);
1455struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
1456
1457
1458
1459
1460
1461static inline void qib_flush_wc(void)
1462{
1463#if defined(CONFIG_X86_64)
1464 asm volatile("sfence" : : : "memory");
1465#else
1466 wmb();
1467#endif
1468}
1469
1470
1471extern unsigned qib_ibmtu;
1472extern ushort qib_cfgctxts;
1473extern ushort qib_num_cfg_vls;
1474extern ushort qib_mini_init;
1475extern unsigned qib_n_krcv_queues;
1476extern unsigned qib_sdma_fetch_arb;
1477extern unsigned qib_compat_ddr_negotiate;
1478extern int qib_special_trigger;
1479extern unsigned qib_numa_aware;
1480
1481extern struct mutex qib_mutex;
1482
1483
1484#define STATUS_TIMEOUT 60
1485
1486#define QIB_DRV_NAME "ib_qib"
1487#define QIB_USER_MINOR_BASE 0
1488#define QIB_TRACE_MINOR 127
1489#define QIB_DIAGPKT_MINOR 128
1490#define QIB_DIAG_MINOR_BASE 129
1491#define QIB_NMINORS 255
1492
1493#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1494#define PCI_VENDOR_ID_QLOGIC 0x1077
1495#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1496#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1497#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508#define qib_early_err(dev, fmt, ...) \
1509 dev_err(dev, fmt, ##__VA_ARGS__)
1510
1511#define qib_dev_err(dd, fmt, ...) \
1512 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1513 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1514
1515#define qib_dev_warn(dd, fmt, ...) \
1516 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
1517 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1518
1519#define qib_dev_porterr(dd, port, fmt, ...) \
1520 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1521 qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1522 ##__VA_ARGS__)
1523
1524#define qib_devinfo(pcidev, fmt, ...) \
1525 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
1526
1527
1528
1529
1530struct qib_hwerror_msgs {
1531 u64 mask;
1532 const char *msg;
1533 size_t sz;
1534};
1535
1536#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1537
1538
1539void qib_format_hwerrors(u64 hwerrs,
1540 const struct qib_hwerror_msgs *hwerrmsgs,
1541 size_t nhwerrmsgs, char *msg, size_t lmsg);
1542
1543void qib_stop_send_queue(struct rvt_qp *qp);
1544void qib_quiesce_qp(struct rvt_qp *qp);
1545void qib_flush_qp_waiters(struct rvt_qp *qp);
1546int qib_mtu_to_path_mtu(u32 mtu);
1547u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
1548void qib_notify_error_qp(struct rvt_qp *qp);
1549int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1550 struct ib_qp_attr *attr);
1551
1552#endif
1553