1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/slab.h>
49#include <linux/io.h>
50#include <linux/fs.h>
51#include <linux/completion.h>
52#include <linux/kref.h>
53#include <linux/sched.h>
54#include <linux/kthread.h>
55#include <rdma/rdma_vt.h>
56
57#include "qib_common.h"
58#include "qib_verbs.h"
59
60
61#define QIB_CHIP_VERS_MAJ 2U
62
63
64#define QIB_CHIP_VERS_MIN 0U
65
66
67#define QIB_OUI 0x001175
68#define QIB_OUI_LSB 40
69
70
71
72
73
74
75
76
77
78struct qlogic_ib_stats {
79 __u64 sps_ints;
80 __u64 sps_errints;
81 __u64 sps_txerrs;
82 __u64 sps_rcverrs;
83 __u64 sps_hwerrs;
84 __u64 sps_nopiobufs;
85 __u64 sps_ctxts;
86 __u64 sps_lenerrs;
87 __u64 sps_buffull;
88 __u64 sps_hdrfull;
89};
90
91extern struct qlogic_ib_stats qib_stats;
92extern const struct pci_error_handlers qib_pci_err_handler;
93
94#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
95
96
97
98
99
100
101#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
102
103
104
105
106
107
108
109#define QIB_EEP_LOG_CNT (4)
110struct qib_eep_log_mask {
111 u64 errs_to_log;
112 u64 hwerrs_to_log;
113};
114
115
116
117
118
119#ifdef CONFIG_DEBUG_FS
120struct qib_opcode_stats_perctx;
121#endif
122
123struct qib_ctxtdata {
124 void **rcvegrbuf;
125 dma_addr_t *rcvegrbuf_phys;
126
127 void *rcvhdrq;
128
129 void *rcvhdrtail_kvaddr;
130
131
132
133
134 void *tid_pg_list;
135
136
137
138
139
140 unsigned long *user_event_mask;
141
142 wait_queue_head_t wait;
143
144
145
146
147 dma_addr_t rcvegr_phys;
148
149 dma_addr_t rcvhdrq_phys;
150 dma_addr_t rcvhdrqtailaddr_phys;
151
152
153
154
155
156 int cnt;
157
158
159
160
161
162 unsigned ctxt;
163
164 int node_id;
165
166 u16 subctxt_cnt;
167
168 u16 subctxt_id;
169
170 u16 rcvegrcnt;
171
172 u16 rcvegr_tid_base;
173
174 u32 piocnt;
175
176 u32 pio_base;
177
178 u32 piobufs;
179
180 u32 rcvegrbuf_chunks;
181
182 u16 rcvegrbufs_perchunk;
183
184 u16 rcvegrbufs_perchunk_shift;
185
186 size_t rcvegrbuf_size;
187
188 size_t rcvhdrq_size;
189
190 unsigned long flag;
191
192 u32 tidcursor;
193
194 u32 rcvwait_to;
195
196 u32 piowait_to;
197
198 u32 rcvnowait;
199
200 u32 pionowait;
201
202 u32 urgent;
203
204 u32 urgent_poll;
205
206 pid_t pid;
207 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
208
209 char comm[16];
210
211 u16 pkeys[4];
212
213 struct qib_devdata *dd;
214
215 struct qib_pportdata *ppd;
216
217 void *subctxt_uregbase;
218
219 void *subctxt_rcvegrbuf;
220
221 void *subctxt_rcvhdr_base;
222
223 u32 userversion;
224
225 u32 active_slaves;
226
227 u16 poll_type;
228
229 u8 seq_cnt;
230 u8 redirect_seq_cnt;
231
232 u32 head;
233
234 struct list_head qp_wait_list;
235#ifdef CONFIG_DEBUG_FS
236
237 struct qib_opcode_stats_perctx *opstats;
238#endif
239};
240
241struct rvt_sge_state;
242
243struct qib_sdma_txreq {
244 int flags;
245 int sg_count;
246 dma_addr_t addr;
247 void (*callback)(struct qib_sdma_txreq *, int);
248 u16 start_idx;
249 u16 next_descq_idx;
250 struct list_head list;
251};
252
253struct qib_sdma_desc {
254 __le64 qw[2];
255};
256
257struct qib_verbs_txreq {
258 struct qib_sdma_txreq txreq;
259 struct rvt_qp *qp;
260 struct rvt_swqe *wqe;
261 u32 dwords;
262 u16 hdr_dwords;
263 u16 hdr_inx;
264 struct qib_pio_header *align_buf;
265 struct rvt_mregion *mr;
266 struct rvt_sge_state *ss;
267};
268
269#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
270#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
271#define QIB_SDMA_TXREQ_F_INTREQ 0x4
272#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
273#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
274
275#define QIB_SDMA_TXREQ_S_OK 0
276#define QIB_SDMA_TXREQ_S_SENDERROR 1
277#define QIB_SDMA_TXREQ_S_ABORTED 2
278#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
279
280
281
282
283
284
285#define QIB_IB_CFG_LIDLMC 0
286#define QIB_IB_CFG_LWID_ENB 2
287#define QIB_IB_CFG_LWID 3
288#define QIB_IB_CFG_SPD_ENB 4
289#define QIB_IB_CFG_SPD 5
290#define QIB_IB_CFG_RXPOL_ENB 6
291#define QIB_IB_CFG_LREV_ENB 7
292#define QIB_IB_CFG_LINKLATENCY 8
293#define QIB_IB_CFG_HRTBT 9
294#define QIB_IB_CFG_OP_VLS 10
295#define QIB_IB_CFG_VL_HIGH_CAP 11
296#define QIB_IB_CFG_VL_LOW_CAP 12
297#define QIB_IB_CFG_OVERRUN_THRESH 13
298#define QIB_IB_CFG_PHYERR_THRESH 14
299#define QIB_IB_CFG_LINKDEFAULT 15
300#define QIB_IB_CFG_PKEYS 16
301#define QIB_IB_CFG_MTU 17
302#define QIB_IB_CFG_LSTATE 18
303#define QIB_IB_CFG_VL_HIGH_LIMIT 19
304#define QIB_IB_CFG_PMA_TICKS 20
305#define QIB_IB_CFG_PORT 21
306
307
308
309
310
311
312#define IB_LINKCMD_DOWN (0 << 16)
313#define IB_LINKCMD_ARMED (1 << 16)
314#define IB_LINKCMD_ACTIVE (2 << 16)
315#define IB_LINKINITCMD_NOP 0
316#define IB_LINKINITCMD_POLL 1
317#define IB_LINKINITCMD_SLEEP 2
318#define IB_LINKINITCMD_DISABLE 3
319
320
321
322
323#define QIB_IB_LINKDOWN 0
324#define QIB_IB_LINKARM 1
325#define QIB_IB_LINKACTIVE 2
326#define QIB_IB_LINKDOWN_ONLY 3
327#define QIB_IB_LINKDOWN_SLEEP 4
328#define QIB_IB_LINKDOWN_DISABLE 5
329
330
331
332
333
334
335
336
337#define QIB_IB_SDR 1
338#define QIB_IB_DDR 2
339#define QIB_IB_QDR 4
340
341#define QIB_DEFAULT_MTU 4096
342
343
344#define QIB_MAX_IB_PORTS 2
345
346
347
348
349#define QIB_IB_TBL_VL_HIGH_ARB 1
350#define QIB_IB_TBL_VL_LOW_ARB 2
351
352
353
354
355
356
357#define QIB_RCVCTRL_TAILUPD_ENB 0x01
358#define QIB_RCVCTRL_TAILUPD_DIS 0x02
359#define QIB_RCVCTRL_CTXT_ENB 0x04
360#define QIB_RCVCTRL_CTXT_DIS 0x08
361#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
362#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
363#define QIB_RCVCTRL_PKEY_ENB 0x40
364#define QIB_RCVCTRL_PKEY_DIS 0x80
365#define QIB_RCVCTRL_BP_ENB 0x0100
366#define QIB_RCVCTRL_BP_DIS 0x0200
367#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
368#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
369
370
371
372
373
374
375
376
377#define QIB_SENDCTRL_DISARM (0x1000)
378#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
379
380#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
381#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
382#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
383#define QIB_SENDCTRL_SEND_DIS (0x20000)
384#define QIB_SENDCTRL_SEND_ENB (0x40000)
385#define QIB_SENDCTRL_FLUSH (0x80000)
386#define QIB_SENDCTRL_CLEAR (0x100000)
387#define QIB_SENDCTRL_DISARM_ALL (0x200000)
388
389
390
391
392
393
394
395
396#define QIBPORTCNTR_PKTSEND 0U
397#define QIBPORTCNTR_WORDSEND 1U
398#define QIBPORTCNTR_PSXMITDATA 2U
399#define QIBPORTCNTR_PSXMITPKTS 3U
400#define QIBPORTCNTR_PSXMITWAIT 4U
401#define QIBPORTCNTR_SENDSTALL 5U
402
403#define QIBPORTCNTR_PKTRCV 6U
404#define QIBPORTCNTR_PSRCVDATA 7U
405#define QIBPORTCNTR_PSRCVPKTS 8U
406#define QIBPORTCNTR_RCVEBP 9U
407#define QIBPORTCNTR_RCVOVFL 10U
408#define QIBPORTCNTR_WORDRCV 11U
409
410#define QIBPORTCNTR_RXLOCALPHYERR 12U
411#define QIBPORTCNTR_RXVLERR 13U
412#define QIBPORTCNTR_ERRICRC 14U
413#define QIBPORTCNTR_ERRVCRC 15U
414#define QIBPORTCNTR_ERRLPCRC 16U
415#define QIBPORTCNTR_BADFORMAT 17U
416#define QIBPORTCNTR_ERR_RLEN 18U
417#define QIBPORTCNTR_IBSYMBOLERR 19U
418#define QIBPORTCNTR_INVALIDRLEN 20U
419#define QIBPORTCNTR_UNSUPVL 21U
420#define QIBPORTCNTR_EXCESSBUFOVFL 22U
421#define QIBPORTCNTR_ERRLINK 23U
422#define QIBPORTCNTR_IBLINKDOWN 24U
423#define QIBPORTCNTR_IBLINKERRRECOV 25U
424#define QIBPORTCNTR_LLI 26U
425
426#define QIBPORTCNTR_RXDROPPKT 27U
427#define QIBPORTCNTR_VL15PKTDROP 28U
428#define QIBPORTCNTR_ERRPKEY 29U
429#define QIBPORTCNTR_KHDROVFL 30U
430
431#define QIBPORTCNTR_PSINTERVAL 31U
432#define QIBPORTCNTR_PSSTART 32U
433#define QIBPORTCNTR_PSSTAT 33U
434
435
436#define ACTIVITY_TIMER 5
437
438#define MAX_NAME_SIZE 64
439
440#ifdef CONFIG_INFINIBAND_QIB_DCA
441struct qib_irq_notify;
442#endif
443
444struct qib_msix_entry {
445 struct msix_entry msix;
446 void *arg;
447#ifdef CONFIG_INFINIBAND_QIB_DCA
448 int dca;
449 int rcv;
450 struct qib_irq_notify *notifier;
451#endif
452 char name[MAX_NAME_SIZE];
453 cpumask_var_t mask;
454};
455
456
457
458
459
460
461struct qib_chip_specific;
462struct qib_chipport_specific;
463
464enum qib_sdma_states {
465 qib_sdma_state_s00_hw_down,
466 qib_sdma_state_s10_hw_start_up_wait,
467 qib_sdma_state_s20_idle,
468 qib_sdma_state_s30_sw_clean_up_wait,
469 qib_sdma_state_s40_hw_clean_up_wait,
470 qib_sdma_state_s50_hw_halt_wait,
471 qib_sdma_state_s99_running,
472};
473
474enum qib_sdma_events {
475 qib_sdma_event_e00_go_hw_down,
476 qib_sdma_event_e10_go_hw_start,
477 qib_sdma_event_e20_hw_started,
478 qib_sdma_event_e30_go_running,
479 qib_sdma_event_e40_sw_cleaned,
480 qib_sdma_event_e50_hw_cleaned,
481 qib_sdma_event_e60_hw_halted,
482 qib_sdma_event_e70_go_idle,
483 qib_sdma_event_e7220_err_halted,
484 qib_sdma_event_e7322_err_halted,
485 qib_sdma_event_e90_timer_tick,
486};
487
488extern char *qib_sdma_state_names[];
489extern char *qib_sdma_event_names[];
490
491struct sdma_set_state_action {
492 unsigned op_enable:1;
493 unsigned op_intenable:1;
494 unsigned op_halt:1;
495 unsigned op_drain:1;
496 unsigned go_s99_running_tofalse:1;
497 unsigned go_s99_running_totrue:1;
498};
499
500struct qib_sdma_state {
501 struct kref kref;
502 struct completion comp;
503 enum qib_sdma_states current_state;
504 struct sdma_set_state_action *set_state_action;
505 unsigned current_op;
506 unsigned go_s99_running;
507 unsigned first_sendbuf;
508 unsigned last_sendbuf;
509
510 enum qib_sdma_states previous_state;
511 unsigned previous_op;
512 enum qib_sdma_events last_event;
513};
514
515struct xmit_wait {
516 struct timer_list timer;
517 u64 counter;
518 u8 flags;
519 struct cache {
520 u64 psxmitdata;
521 u64 psrcvdata;
522 u64 psxmitpkts;
523 u64 psrcvpkts;
524 u64 psxmitwait;
525 } counter_cache;
526};
527
528
529
530
531
532
533
534struct qib_pportdata {
535 struct qib_ibport ibport_data;
536
537 struct qib_devdata *dd;
538 struct qib_chippport_specific *cpspec;
539 struct kobject pport_kobj;
540 struct kobject pport_cc_kobj;
541 struct kobject sl2vl_kobj;
542 struct kobject diagc_kobj;
543
544
545 __be64 guid;
546
547
548 u32 lflags;
549
550 u32 state_wanted;
551 spinlock_t lflags_lock;
552
553
554 atomic_t pkeyrefs[4];
555
556
557
558
559
560 u64 *statusp;
561
562
563
564
565 struct qib_sdma_desc *sdma_descq;
566 struct workqueue_struct *qib_wq;
567 struct qib_sdma_state sdma_state;
568 dma_addr_t sdma_descq_phys;
569 volatile __le64 *sdma_head_dma;
570 dma_addr_t sdma_head_phys;
571 u16 sdma_descq_cnt;
572
573
574 spinlock_t sdma_lock ____cacheline_aligned_in_smp;
575 struct list_head sdma_activelist;
576 struct list_head sdma_userpending;
577 u64 sdma_descq_added;
578 u64 sdma_descq_removed;
579 u16 sdma_descq_tail;
580 u16 sdma_descq_head;
581 u8 sdma_generation;
582 u8 sdma_intrequest;
583
584 struct tasklet_struct sdma_sw_clean_up_task
585 ____cacheline_aligned_in_smp;
586
587 wait_queue_head_t state_wait;
588
589
590 unsigned hol_state;
591 struct timer_list hol_timer;
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608 u64 lastibcstat;
609
610
611
612
613
614
615
616 unsigned long p_rcvctrl;
617 unsigned long p_sendctrl;
618
619 u32 ibmtu;
620
621
622
623
624 u32 ibmaxlen;
625
626
627
628
629 u32 init_ibmaxlen;
630
631 u16 lid;
632
633 u16 pkeys[4];
634
635 u8 lmc;
636 u8 link_width_supported;
637 u8 link_speed_supported;
638 u8 link_width_enabled;
639 u8 link_speed_enabled;
640 u8 link_width_active;
641 u8 link_speed_active;
642 u8 vls_supported;
643 u8 vls_operational;
644
645 u8 rx_pol_inv;
646
647 u8 hw_pidx;
648 u8 port;
649
650 u8 delay_mult;
651
652
653 u8 led_override;
654 u16 led_override_timeoff;
655 u8 led_override_vals[2];
656 u8 led_override_phase;
657 atomic_t led_override_timer_active;
658
659 struct timer_list led_override_timer;
660 struct xmit_wait cong_stats;
661 struct timer_list symerr_clear_timer;
662
663
664 spinlock_t cc_shadow_lock
665 ____cacheline_aligned_in_smp;
666
667
668 struct cc_table_shadow *ccti_entries_shadow;
669
670
671 struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
672
673
674 struct ib_cc_table_entry_shadow *ccti_entries;
675
676
677 struct ib_cc_congestion_entry_shadow *congestion_entries;
678
679
680
681
682 u16 cc_supported_table_entries;
683
684
685 u16 total_cct_entry;
686
687
688 u16 cc_sl_control_map;
689
690
691 u16 ccti_limit;
692
693
694 u8 cc_max_table_entries;
695};
696
697
698
699
700
701
702
703
704struct diag_observer;
705
706typedef int (*diag_hook) (struct qib_devdata *dd,
707 const struct diag_observer *op,
708 u32 offs, u64 *data, u64 mask, int only_32);
709
710struct diag_observer {
711 diag_hook hook;
712 u32 bottom;
713 u32 top;
714};
715
716extern int qib_register_observer(struct qib_devdata *dd,
717 const struct diag_observer *op);
718
719
720struct diag_observer_list_elt;
721
722
723
724
725
726
727struct qib_devdata {
728 struct qib_ibdev verbs_dev;
729 struct list_head list;
730
731
732 struct pci_dev *pcidev;
733 struct cdev *user_cdev;
734 struct cdev *diag_cdev;
735 struct device *user_device;
736 struct device *diag_device;
737
738
739 u64 __iomem *kregbase;
740
741 u64 __iomem *kregend;
742
743 resource_size_t physaddr;
744
745 struct qib_ctxtdata **rcd;
746
747
748
749
750 struct qib_pportdata *pport;
751 struct qib_chip_specific *cspec;
752
753
754 void __iomem *pio2kbase;
755
756 void __iomem *pio4kbase;
757
758 void __iomem *piobase;
759
760 u64 __iomem *userbase;
761 void __iomem *piovl15base;
762
763
764
765
766
767
768
769 volatile __le64 *pioavailregs_dma;
770
771 dma_addr_t pioavailregs_phys;
772
773
774
775
776
777
778
779 int (*f_intr_fallback)(struct qib_devdata *);
780
781 int (*f_reset)(struct qib_devdata *);
782 void (*f_quiet_serdes)(struct qib_pportdata *);
783 int (*f_bringup_serdes)(struct qib_pportdata *);
784 int (*f_early_init)(struct qib_devdata *);
785 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
786 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
787 u32, unsigned long);
788 void (*f_cleanup)(struct qib_devdata *);
789 void (*f_setextled)(struct qib_pportdata *, u32);
790
791 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
792
793 void (*f_free_irq)(struct qib_devdata *);
794 struct qib_message_header *(*f_get_msgheader)
795 (struct qib_devdata *, __le32 *);
796 void (*f_config_ctxts)(struct qib_devdata *);
797 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
798 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
799 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
800 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
801 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
802 u32 (*f_iblink_state)(u64);
803 u8 (*f_ibphys_portstate)(u64);
804 void (*f_xgxs_reset)(struct qib_pportdata *);
805
806 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
807 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
808
809 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
810 u32 mask);
811
812 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
813
814
815
816
817
818
819 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
820 int ctxt);
821
822 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
823 void (*f_set_intr_state)(struct qib_devdata *, u32);
824 void (*f_set_armlaunch)(struct qib_devdata *, u32);
825 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
826 int (*f_late_initreg)(struct qib_devdata *);
827 int (*f_init_sdma_regs)(struct qib_pportdata *);
828 u16 (*f_sdma_gethead)(struct qib_pportdata *);
829 int (*f_sdma_busy)(struct qib_pportdata *);
830 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
831 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
832 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
833 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
834 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
835 void (*f_sdma_init_early)(struct qib_pportdata *);
836 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
837 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
838 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
839 u64 (*f_portcntr)(struct qib_pportdata *, u32);
840 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
841 u64 **);
842 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
843 char **, u64 **);
844 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
845 void (*f_initvl15_bufs)(struct qib_devdata *);
846 void (*f_init_ctxt)(struct qib_ctxtdata *);
847 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
848 struct qib_ctxtdata *);
849 void (*f_writescratch)(struct qib_devdata *, u32);
850 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
851#ifdef CONFIG_INFINIBAND_QIB_DCA
852 int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
853#endif
854
855 char *boardname;
856
857
858 u64 tidtemplate;
859
860 u64 tidinvalid;
861
862
863 u32 pioavregs;
864
865 u32 flags;
866
867 u32 lastctxt_piobuf;
868
869
870 u64 z_int_counter;
871
872 u64 __percpu *int_counter;
873
874
875 u32 pbufsctxt;
876
877 u32 ctxts_extrabuf;
878
879
880
881
882 u32 cfgctxts;
883
884
885
886 u32 freectxts;
887
888
889
890
891
892 u32 upd_pio_shadow;
893
894
895 u32 maxpkts_call;
896 u32 avgpkts_call;
897 u64 nopiobufs;
898
899
900 u16 vendorid;
901
902 u16 deviceid;
903
904 int wc_cookie;
905 unsigned long wc_base;
906 unsigned long wc_len;
907
908
909 struct page **pageshadow;
910
911 dma_addr_t *physshadow;
912 u64 __iomem *egrtidbase;
913 spinlock_t sendctrl_lock;
914
915 spinlock_t uctxt_lock;
916
917
918
919
920
921 u64 *devstatusp;
922 char *freezemsg;
923 u32 freezelen;
924
925 struct timer_list stats_timer;
926
927
928 struct timer_list intrchk_timer;
929 unsigned long ureg_align;
930
931
932
933
934
935 spinlock_t pioavail_lock;
936
937
938
939 u32 last_pio;
940
941
942
943 u32 min_kernel_pio;
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 unsigned long pioavailshadow[6];
960
961 unsigned long pioavailkernel[6];
962
963 unsigned long pio_need_disarm[3];
964
965 unsigned long pio_writing[3];
966
967 u64 revision;
968
969 __be64 base_guid;
970
971
972
973
974
975 u64 piobufbase;
976 u32 pio2k_bufbase;
977
978
979
980
981 u32 nguid;
982
983
984
985
986 unsigned long rcvctrl;
987 unsigned long sendctrl;
988
989
990 u32 rcvhdrcnt;
991
992 u32 rcvhdrsize;
993
994 u32 rcvhdrentsize;
995
996 u32 ctxtcnt;
997
998 u32 palign;
999
1000 u32 piobcnt2k;
1001
1002 u32 piosize2k;
1003
1004 u32 piosize2kmax_dwords;
1005
1006 u32 piobcnt4k;
1007
1008 u32 piosize4k;
1009
1010 u32 rcvegrbase;
1011
1012 u32 rcvtidbase;
1013
1014 u32 rcvtidcnt;
1015
1016 u32 uregbase;
1017
1018 u32 control;
1019
1020
1021 u32 align4k;
1022
1023 u16 rcvegrbufsize;
1024
1025 u16 rcvegrbufsize_shift;
1026
1027 u32 lbus_width;
1028
1029 u32 lbus_speed;
1030 int unit;
1031
1032
1033
1034 u32 msi_lo;
1035
1036 u32 msi_hi;
1037
1038 u16 msi_data;
1039
1040 u32 pcibar0;
1041
1042 u32 pcibar1;
1043 u64 rhdrhead_intr_off;
1044
1045
1046
1047
1048
1049 u8 serial[16];
1050
1051 u8 boardversion[96];
1052 u8 lbus_info[32];
1053
1054 u8 majrev;
1055
1056 u8 minrev;
1057
1058
1059
1060 u8 num_pports;
1061
1062 u8 first_user_ctxt;
1063 u8 n_krcv_queues;
1064 u8 qpn_mask;
1065 u8 skip_kctxt_mask;
1066
1067 u16 rhf_offset;
1068
1069
1070
1071
1072 u8 gpio_sda_num;
1073 u8 gpio_scl_num;
1074 u8 twsi_eeprom_dev;
1075 u8 board_atten;
1076
1077
1078
1079 spinlock_t eep_st_lock;
1080
1081 struct mutex eep_lock;
1082 uint64_t traffic_wds;
1083
1084
1085
1086
1087 struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
1088 struct qib_diag_client *diag_client;
1089 spinlock_t qib_diag_trans_lock;
1090 struct diag_observer_list_elt *diag_observer_list;
1091
1092 u8 psxmitwait_supported;
1093
1094 u16 psxmitwait_check_rate;
1095
1096 struct tasklet_struct error_tasklet;
1097
1098 int assigned_node_id;
1099};
1100
1101
1102#define QIB_HOL_UP 0
1103#define QIB_HOL_INIT 1
1104
1105#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1106#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1107#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1108#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1109#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1110
1111
1112#define TXCHK_CHG_TYPE_DIS1 3
1113#define TXCHK_CHG_TYPE_ENAB1 2
1114#define TXCHK_CHG_TYPE_KERN 1
1115#define TXCHK_CHG_TYPE_USER 0
1116
1117#define QIB_CHASE_TIME msecs_to_jiffies(145)
1118#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1119
1120
1121struct qib_filedata {
1122 struct qib_ctxtdata *rcd;
1123 unsigned subctxt;
1124 unsigned tidcursor;
1125 struct qib_user_sdma_queue *pq;
1126 int rec_cpu_num;
1127};
1128
1129extern struct list_head qib_dev_list;
1130extern spinlock_t qib_devs_lock;
1131extern struct qib_devdata *qib_lookup(int unit);
1132extern u32 qib_cpulist_count;
1133extern unsigned long *qib_cpulist;
1134extern u16 qpt_mask;
1135extern unsigned qib_cc_table_size;
1136
1137int qib_init(struct qib_devdata *, int);
1138int init_chip_wc_pat(struct qib_devdata *dd, u32);
1139int qib_enable_wc(struct qib_devdata *dd);
1140void qib_disable_wc(struct qib_devdata *dd);
1141int qib_count_units(int *npresentp, int *nupp);
1142int qib_count_active_units(void);
1143
1144int qib_cdev_init(int minor, const char *name,
1145 const struct file_operations *fops,
1146 struct cdev **cdevp, struct device **devp);
1147void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1148int qib_dev_init(void);
1149void qib_dev_cleanup(void);
1150
1151int qib_diag_add(struct qib_devdata *);
1152void qib_diag_remove(struct qib_devdata *);
1153void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1154void qib_sdma_update_tail(struct qib_pportdata *, u16);
1155
1156int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1157void qib_bad_intrstatus(struct qib_devdata *);
1158void qib_handle_urcv(struct qib_devdata *, u64);
1159
1160
1161void qib_chip_cleanup(struct qib_devdata *);
1162
1163void qib_chip_done(void);
1164
1165
1166int qib_unordered_wc(void);
1167void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1168
1169void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1170int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1171void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1172void qib_cancel_sends(struct qib_pportdata *);
1173
1174int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1175int qib_setup_eagerbufs(struct qib_ctxtdata *);
1176void qib_set_ctxtcnt(struct qib_devdata *);
1177int qib_create_ctxts(struct qib_devdata *dd);
1178struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
1179int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1180void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1181
1182u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1183int qib_reset_device(int);
1184int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1185int qib_set_linkstate(struct qib_pportdata *, u8);
1186int qib_set_mtu(struct qib_pportdata *, u16);
1187int qib_set_lid(struct qib_pportdata *, u32, u8);
1188void qib_hol_down(struct qib_pportdata *);
1189void qib_hol_init(struct qib_pportdata *);
1190void qib_hol_up(struct qib_pportdata *);
1191void qib_hol_event(unsigned long);
1192void qib_disable_after_error(struct qib_devdata *);
1193int qib_set_uevent_bits(struct qib_pportdata *, const int);
1194
1195
1196#define ctxt_fp(fp) \
1197 (((struct qib_filedata *)(fp)->private_data)->rcd)
1198#define subctxt_fp(fp) \
1199 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1200#define tidcursor_fp(fp) \
1201 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1202#define user_sdma_queue_fp(fp) \
1203 (((struct qib_filedata *)(fp)->private_data)->pq)
1204
1205static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1206{
1207 return ppd->dd;
1208}
1209
1210static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1211{
1212 return container_of(dev, struct qib_devdata, verbs_dev);
1213}
1214
1215static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1216{
1217 return dd_from_dev(to_idev(ibdev));
1218}
1219
1220static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1221{
1222 return container_of(ibp, struct qib_pportdata, ibport_data);
1223}
1224
1225static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1226{
1227 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1228 unsigned pidx = port - 1;
1229
1230 WARN_ON(pidx >= dd->num_pports);
1231 return &dd->pport[pidx].ibport_data;
1232}
1233
1234
1235
1236
1237#define QIB_HAS_LINK_LATENCY 0x1
1238#define QIB_INITTED 0x2
1239#define QIB_DOING_RESET 0x4
1240#define QIB_PRESENT 0x8
1241#define QIB_PIO_FLUSH_WC 0x10
1242#define QIB_HAS_THRESH_UPDATE 0x40
1243#define QIB_HAS_SDMA_TIMEOUT 0x80
1244#define QIB_USE_SPCL_TRIG 0x100
1245#define QIB_NODMA_RTAIL 0x200
1246#define QIB_HAS_INTX 0x800
1247#define QIB_HAS_SEND_DMA 0x1000
1248#define QIB_HAS_VLSUPP 0x2000
1249#define QIB_HAS_HDRSUPP 0x4000
1250#define QIB_BADINTR 0x8000
1251#define QIB_DCA_ENABLED 0x10000
1252#define QIB_HAS_QSFP 0x20000
1253
1254
1255
1256
1257#define QIBL_LINKV 0x1
1258#define QIBL_LINKDOWN 0x8
1259#define QIBL_LINKINIT 0x10
1260#define QIBL_LINKARMED 0x20
1261#define QIBL_LINKACTIVE 0x40
1262
1263#define QIBL_IB_AUTONEG_INPROG 0x1000
1264#define QIBL_IB_AUTONEG_FAILED 0x2000
1265#define QIBL_IB_LINK_DISABLED 0x4000
1266
1267#define QIBL_IB_FORCE_NOTIFY 0x8000
1268
1269
1270#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1271
1272
1273
1274
1275#define QIB_CTXT_WAITING_RCV 2
1276
1277#define QIB_CTXT_MASTER_UNINIT 4
1278
1279#define QIB_CTXT_WAITING_URG 5
1280
1281
1282void qib_free_data(struct qib_ctxtdata *dd);
1283void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1284 u32, struct qib_ctxtdata *);
1285struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1286 const struct pci_device_id *);
1287struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1288 const struct pci_device_id *);
1289struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1290 const struct pci_device_id *);
1291void qib_free_devdata(struct qib_devdata *);
1292struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1293
1294#define QIB_TWSI_NO_DEV 0xFF
1295
1296int qib_twsi_reset(struct qib_devdata *dd);
1297int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1298 int len);
1299int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1300 const void *buffer, int len);
1301void qib_get_eeprom_info(struct qib_devdata *);
1302#define qib_inc_eeprom_err(dd, eidx, incr)
1303void qib_dump_lookup_output_queue(struct qib_devdata *);
1304void qib_force_pio_avail_update(struct qib_devdata *);
1305void qib_clear_symerror_on_linkup(unsigned long opaque);
1306
1307
1308
1309
1310
1311
1312#define QIB_LED_PHYS 1
1313#define QIB_LED_LOG 2
1314void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1315
1316
1317int qib_setup_sdma(struct qib_pportdata *);
1318void qib_teardown_sdma(struct qib_pportdata *);
1319void __qib_sdma_intr(struct qib_pportdata *);
1320void qib_sdma_intr(struct qib_pportdata *);
1321void qib_user_sdma_send_desc(struct qib_pportdata *dd,
1322 struct list_head *pktlist);
1323int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
1324 u32, struct qib_verbs_txreq *);
1325
1326int qib_sdma_make_progress(struct qib_pportdata *dd);
1327
1328static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
1329{
1330 return ppd->sdma_descq_added == ppd->sdma_descq_removed;
1331}
1332
1333
1334static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1335{
1336 return ppd->sdma_descq_cnt -
1337 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1338}
1339
1340static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1341{
1342 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1343}
1344int qib_sdma_running(struct qib_pportdata *);
1345void dump_sdma_state(struct qib_pportdata *ppd);
1346void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1347void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1348
1349
1350
1351
1352#define QIB_DFLT_RCVHDRSIZE 9
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365#define QIB_RCVHDR_ENTSIZE 32
1366
1367int qib_get_user_pages(unsigned long, size_t, struct page **);
1368void qib_release_user_pages(struct page **, size_t);
1369int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1370int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1371u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1372void qib_sendbuf_done(struct qib_devdata *, unsigned);
1373
1374static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1375{
1376 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1377}
1378
1379static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1380{
1381
1382
1383
1384
1385 return (u32) le64_to_cpu(
1386 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr));
1387}
1388
1389static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1390{
1391 const struct qib_devdata *dd = rcd->dd;
1392 u32 hdrqtail;
1393
1394 if (dd->flags & QIB_NODMA_RTAIL) {
1395 __le32 *rhf_addr;
1396 u32 seq;
1397
1398 rhf_addr = (__le32 *) rcd->rcvhdrq +
1399 rcd->head + dd->rhf_offset;
1400 seq = qib_hdrget_seq(rhf_addr);
1401 hdrqtail = rcd->head;
1402 if (seq == rcd->seq_cnt)
1403 hdrqtail++;
1404 } else
1405 hdrqtail = qib_get_rcvhdrtail(rcd);
1406
1407 return hdrqtail;
1408}
1409
1410
1411
1412
1413
1414extern const char ib_qib_version[];
1415
1416int qib_device_create(struct qib_devdata *);
1417void qib_device_remove(struct qib_devdata *);
1418
1419int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1420 struct kobject *kobj);
1421int qib_verbs_register_sysfs(struct qib_devdata *);
1422void qib_verbs_unregister_sysfs(struct qib_devdata *);
1423
1424extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1425
1426int __init qib_init_qibfs(void);
1427int __exit qib_exit_qibfs(void);
1428
1429int qibfs_add(struct qib_devdata *);
1430int qibfs_remove(struct qib_devdata *);
1431
1432int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1433int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1434 const struct pci_device_id *);
1435void qib_pcie_ddcleanup(struct qib_devdata *);
1436int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
1437int qib_reinit_intr(struct qib_devdata *);
1438void qib_enable_intx(struct pci_dev *);
1439void qib_nomsi(struct qib_devdata *);
1440void qib_nomsix(struct qib_devdata *);
1441void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1442void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1443
1444u64 qib_int_counter(struct qib_devdata *);
1445
1446u64 qib_sps_ints(void);
1447
1448
1449
1450
1451dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1452 size_t, int);
1453const char *qib_get_unit_name(int unit);
1454const char *qib_get_card_name(struct rvt_dev_info *rdi);
1455struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
1456
1457
1458
1459
1460
1461static inline void qib_flush_wc(void)
1462{
1463#if defined(CONFIG_X86_64)
1464 asm volatile("sfence" : : : "memory");
1465#else
1466 wmb();
1467#endif
1468}
1469
1470
1471extern unsigned qib_ibmtu;
1472extern ushort qib_cfgctxts;
1473extern ushort qib_num_cfg_vls;
1474extern ushort qib_mini_init;
1475extern unsigned qib_n_krcv_queues;
1476extern unsigned qib_sdma_fetch_arb;
1477extern unsigned qib_compat_ddr_negotiate;
1478extern int qib_special_trigger;
1479extern unsigned qib_numa_aware;
1480
1481extern struct mutex qib_mutex;
1482
1483
1484#define STATUS_TIMEOUT 60
1485
1486#define QIB_DRV_NAME "ib_qib"
1487#define QIB_USER_MINOR_BASE 0
1488#define QIB_TRACE_MINOR 127
1489#define QIB_DIAGPKT_MINOR 128
1490#define QIB_DIAG_MINOR_BASE 129
1491#define QIB_NMINORS 255
1492
1493#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1494#define PCI_VENDOR_ID_QLOGIC 0x1077
1495#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1496#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1497#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508#define qib_early_err(dev, fmt, ...) \
1509 dev_err(dev, fmt, ##__VA_ARGS__)
1510
1511#define qib_dev_err(dd, fmt, ...) \
1512 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1513 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1514
1515#define qib_dev_warn(dd, fmt, ...) \
1516 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
1517 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1518
1519#define qib_dev_porterr(dd, port, fmt, ...) \
1520 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1521 qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1522 ##__VA_ARGS__)
1523
1524#define qib_devinfo(pcidev, fmt, ...) \
1525 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
1526
1527
1528
1529
1530struct qib_hwerror_msgs {
1531 u64 mask;
1532 const char *msg;
1533 size_t sz;
1534};
1535
1536#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1537
1538
1539void qib_format_hwerrors(u64 hwerrs,
1540 const struct qib_hwerror_msgs *hwerrmsgs,
1541 size_t nhwerrmsgs, char *msg, size_t lmsg);
1542
1543void qib_stop_send_queue(struct rvt_qp *qp);
1544void qib_quiesce_qp(struct rvt_qp *qp);
1545void qib_flush_qp_waiters(struct rvt_qp *qp);
1546int qib_mtu_to_path_mtu(u32 mtu);
1547u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
1548void qib_notify_error_qp(struct rvt_qp *qp);
1549int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1550 struct ib_qp_attr *attr);
1551
1552#endif
1553