1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/slab.h>
49#include <linux/io.h>
50#include <linux/fs.h>
51#include <linux/completion.h>
52#include <linux/kref.h>
53#include <linux/sched.h>
54#include <linux/kthread.h>
55
56#include "qib_common.h"
57#include "qib_verbs.h"
58
59
60#define QIB_CHIP_VERS_MAJ 2U
61
62
63#define QIB_CHIP_VERS_MIN 0U
64
65
66#define QIB_OUI 0x001175
67#define QIB_OUI_LSB 40
68
69
70
71
72
73
74
75
76
77struct qlogic_ib_stats {
78 __u64 sps_ints;
79 __u64 sps_errints;
80 __u64 sps_txerrs;
81 __u64 sps_rcverrs;
82 __u64 sps_hwerrs;
83 __u64 sps_nopiobufs;
84 __u64 sps_ctxts;
85 __u64 sps_lenerrs;
86 __u64 sps_buffull;
87 __u64 sps_hdrfull;
88};
89
90extern struct qlogic_ib_stats qib_stats;
91extern const struct pci_error_handlers qib_pci_err_handler;
92extern struct pci_driver qib_driver;
93
94#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
95
96
97
98
99
100
101#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
102
103
104
105
106
107
108
109#define QIB_EEP_LOG_CNT (4)
110struct qib_eep_log_mask {
111 u64 errs_to_log;
112 u64 hwerrs_to_log;
113};
114
115
116
117
118
119#ifdef CONFIG_DEBUG_FS
120struct qib_opcode_stats_perctx;
121#endif
122
123struct qib_ctxtdata {
124 void **rcvegrbuf;
125 dma_addr_t *rcvegrbuf_phys;
126
127 void *rcvhdrq;
128
129 void *rcvhdrtail_kvaddr;
130
131
132
133
134 void *tid_pg_list;
135
136
137
138
139
140 unsigned long *user_event_mask;
141
142 wait_queue_head_t wait;
143
144
145
146
147 dma_addr_t rcvegr_phys;
148
149 dma_addr_t rcvhdrq_phys;
150 dma_addr_t rcvhdrqtailaddr_phys;
151
152
153
154
155
156 int cnt;
157
158
159
160
161
162 unsigned ctxt;
163
164 int node_id;
165
166 u16 subctxt_cnt;
167
168 u16 subctxt_id;
169
170 u16 rcvegrcnt;
171
172 u16 rcvegr_tid_base;
173
174 u32 piocnt;
175
176 u32 pio_base;
177
178 u32 piobufs;
179
180 u32 rcvegrbuf_chunks;
181
182 u16 rcvegrbufs_perchunk;
183
184 u16 rcvegrbufs_perchunk_shift;
185
186 size_t rcvegrbuf_size;
187
188 size_t rcvhdrq_size;
189
190 unsigned long flag;
191
192 u32 tidcursor;
193
194 u32 rcvwait_to;
195
196 u32 piowait_to;
197
198 u32 rcvnowait;
199
200 u32 pionowait;
201
202 u32 urgent;
203
204 u32 urgent_poll;
205
206 pid_t pid;
207 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
208
209 char comm[16];
210
211 u16 pkeys[4];
212
213 struct qib_devdata *dd;
214
215 struct qib_pportdata *ppd;
216
217 void *subctxt_uregbase;
218
219 void *subctxt_rcvegrbuf;
220
221 void *subctxt_rcvhdr_base;
222
223 u32 userversion;
224
225 u32 active_slaves;
226
227 u16 poll_type;
228
229 u8 seq_cnt;
230 u8 redirect_seq_cnt;
231
232 u32 head;
233
234 struct qib_qp *lookaside_qp;
235 u32 lookaside_qpn;
236
237 struct list_head qp_wait_list;
238#ifdef CONFIG_DEBUG_FS
239
240 struct qib_opcode_stats_perctx *opstats;
241#endif
242};
243
244struct qib_sge_state;
245
246struct qib_sdma_txreq {
247 int flags;
248 int sg_count;
249 dma_addr_t addr;
250 void (*callback)(struct qib_sdma_txreq *, int);
251 u16 start_idx;
252 u16 next_descq_idx;
253 struct list_head list;
254};
255
256struct qib_sdma_desc {
257 __le64 qw[2];
258};
259
260struct qib_verbs_txreq {
261 struct qib_sdma_txreq txreq;
262 struct qib_qp *qp;
263 struct qib_swqe *wqe;
264 u32 dwords;
265 u16 hdr_dwords;
266 u16 hdr_inx;
267 struct qib_pio_header *align_buf;
268 struct qib_mregion *mr;
269 struct qib_sge_state *ss;
270};
271
272#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
273#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
274#define QIB_SDMA_TXREQ_F_INTREQ 0x4
275#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
276#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
277
278#define QIB_SDMA_TXREQ_S_OK 0
279#define QIB_SDMA_TXREQ_S_SENDERROR 1
280#define QIB_SDMA_TXREQ_S_ABORTED 2
281#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
282
283
284
285
286
287
288#define QIB_IB_CFG_LIDLMC 0
289#define QIB_IB_CFG_LWID_ENB 2
290#define QIB_IB_CFG_LWID 3
291#define QIB_IB_CFG_SPD_ENB 4
292#define QIB_IB_CFG_SPD 5
293#define QIB_IB_CFG_RXPOL_ENB 6
294#define QIB_IB_CFG_LREV_ENB 7
295#define QIB_IB_CFG_LINKLATENCY 8
296#define QIB_IB_CFG_HRTBT 9
297#define QIB_IB_CFG_OP_VLS 10
298#define QIB_IB_CFG_VL_HIGH_CAP 11
299#define QIB_IB_CFG_VL_LOW_CAP 12
300#define QIB_IB_CFG_OVERRUN_THRESH 13
301#define QIB_IB_CFG_PHYERR_THRESH 14
302#define QIB_IB_CFG_LINKDEFAULT 15
303#define QIB_IB_CFG_PKEYS 16
304#define QIB_IB_CFG_MTU 17
305#define QIB_IB_CFG_LSTATE 18
306#define QIB_IB_CFG_VL_HIGH_LIMIT 19
307#define QIB_IB_CFG_PMA_TICKS 20
308#define QIB_IB_CFG_PORT 21
309
310
311
312
313
314
315#define IB_LINKCMD_DOWN (0 << 16)
316#define IB_LINKCMD_ARMED (1 << 16)
317#define IB_LINKCMD_ACTIVE (2 << 16)
318#define IB_LINKINITCMD_NOP 0
319#define IB_LINKINITCMD_POLL 1
320#define IB_LINKINITCMD_SLEEP 2
321#define IB_LINKINITCMD_DISABLE 3
322
323
324
325
326#define QIB_IB_LINKDOWN 0
327#define QIB_IB_LINKARM 1
328#define QIB_IB_LINKACTIVE 2
329#define QIB_IB_LINKDOWN_ONLY 3
330#define QIB_IB_LINKDOWN_SLEEP 4
331#define QIB_IB_LINKDOWN_DISABLE 5
332
333
334
335
336
337
338
339
340#define QIB_IB_SDR 1
341#define QIB_IB_DDR 2
342#define QIB_IB_QDR 4
343
344#define QIB_DEFAULT_MTU 4096
345
346
347#define QIB_MAX_IB_PORTS 2
348
349
350
351
352#define QIB_IB_TBL_VL_HIGH_ARB 1
353#define QIB_IB_TBL_VL_LOW_ARB 2
354
355
356
357
358
359
360#define QIB_RCVCTRL_TAILUPD_ENB 0x01
361#define QIB_RCVCTRL_TAILUPD_DIS 0x02
362#define QIB_RCVCTRL_CTXT_ENB 0x04
363#define QIB_RCVCTRL_CTXT_DIS 0x08
364#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
365#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
366#define QIB_RCVCTRL_PKEY_ENB 0x40
367#define QIB_RCVCTRL_PKEY_DIS 0x80
368#define QIB_RCVCTRL_BP_ENB 0x0100
369#define QIB_RCVCTRL_BP_DIS 0x0200
370#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
371#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
372
373
374
375
376
377
378
379
380#define QIB_SENDCTRL_DISARM (0x1000)
381#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
382
383#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
384#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
385#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
386#define QIB_SENDCTRL_SEND_DIS (0x20000)
387#define QIB_SENDCTRL_SEND_ENB (0x40000)
388#define QIB_SENDCTRL_FLUSH (0x80000)
389#define QIB_SENDCTRL_CLEAR (0x100000)
390#define QIB_SENDCTRL_DISARM_ALL (0x200000)
391
392
393
394
395
396
397
398
399#define QIBPORTCNTR_PKTSEND 0U
400#define QIBPORTCNTR_WORDSEND 1U
401#define QIBPORTCNTR_PSXMITDATA 2U
402#define QIBPORTCNTR_PSXMITPKTS 3U
403#define QIBPORTCNTR_PSXMITWAIT 4U
404#define QIBPORTCNTR_SENDSTALL 5U
405
406#define QIBPORTCNTR_PKTRCV 6U
407#define QIBPORTCNTR_PSRCVDATA 7U
408#define QIBPORTCNTR_PSRCVPKTS 8U
409#define QIBPORTCNTR_RCVEBP 9U
410#define QIBPORTCNTR_RCVOVFL 10U
411#define QIBPORTCNTR_WORDRCV 11U
412
413#define QIBPORTCNTR_RXLOCALPHYERR 12U
414#define QIBPORTCNTR_RXVLERR 13U
415#define QIBPORTCNTR_ERRICRC 14U
416#define QIBPORTCNTR_ERRVCRC 15U
417#define QIBPORTCNTR_ERRLPCRC 16U
418#define QIBPORTCNTR_BADFORMAT 17U
419#define QIBPORTCNTR_ERR_RLEN 18U
420#define QIBPORTCNTR_IBSYMBOLERR 19U
421#define QIBPORTCNTR_INVALIDRLEN 20U
422#define QIBPORTCNTR_UNSUPVL 21U
423#define QIBPORTCNTR_EXCESSBUFOVFL 22U
424#define QIBPORTCNTR_ERRLINK 23U
425#define QIBPORTCNTR_IBLINKDOWN 24U
426#define QIBPORTCNTR_IBLINKERRRECOV 25U
427#define QIBPORTCNTR_LLI 26U
428
429#define QIBPORTCNTR_RXDROPPKT 27U
430#define QIBPORTCNTR_VL15PKTDROP 28U
431#define QIBPORTCNTR_ERRPKEY 29U
432#define QIBPORTCNTR_KHDROVFL 30U
433
434#define QIBPORTCNTR_PSINTERVAL 31U
435#define QIBPORTCNTR_PSSTART 32U
436#define QIBPORTCNTR_PSSTAT 33U
437
438
439#define ACTIVITY_TIMER 5
440
441#define MAX_NAME_SIZE 64
442
443#ifdef CONFIG_INFINIBAND_QIB_DCA
444struct qib_irq_notify;
445#endif
446
447struct qib_msix_entry {
448 struct msix_entry msix;
449 void *arg;
450#ifdef CONFIG_INFINIBAND_QIB_DCA
451 int dca;
452 int rcv;
453 struct qib_irq_notify *notifier;
454#endif
455 char name[MAX_NAME_SIZE];
456 cpumask_var_t mask;
457};
458
459
460
461
462
463
464struct qib_chip_specific;
465struct qib_chipport_specific;
466
467enum qib_sdma_states {
468 qib_sdma_state_s00_hw_down,
469 qib_sdma_state_s10_hw_start_up_wait,
470 qib_sdma_state_s20_idle,
471 qib_sdma_state_s30_sw_clean_up_wait,
472 qib_sdma_state_s40_hw_clean_up_wait,
473 qib_sdma_state_s50_hw_halt_wait,
474 qib_sdma_state_s99_running,
475};
476
477enum qib_sdma_events {
478 qib_sdma_event_e00_go_hw_down,
479 qib_sdma_event_e10_go_hw_start,
480 qib_sdma_event_e20_hw_started,
481 qib_sdma_event_e30_go_running,
482 qib_sdma_event_e40_sw_cleaned,
483 qib_sdma_event_e50_hw_cleaned,
484 qib_sdma_event_e60_hw_halted,
485 qib_sdma_event_e70_go_idle,
486 qib_sdma_event_e7220_err_halted,
487 qib_sdma_event_e7322_err_halted,
488 qib_sdma_event_e90_timer_tick,
489};
490
491extern char *qib_sdma_state_names[];
492extern char *qib_sdma_event_names[];
493
494struct sdma_set_state_action {
495 unsigned op_enable:1;
496 unsigned op_intenable:1;
497 unsigned op_halt:1;
498 unsigned op_drain:1;
499 unsigned go_s99_running_tofalse:1;
500 unsigned go_s99_running_totrue:1;
501};
502
503struct qib_sdma_state {
504 struct kref kref;
505 struct completion comp;
506 enum qib_sdma_states current_state;
507 struct sdma_set_state_action *set_state_action;
508 unsigned current_op;
509 unsigned go_s99_running;
510 unsigned first_sendbuf;
511 unsigned last_sendbuf;
512
513 enum qib_sdma_states previous_state;
514 unsigned previous_op;
515 enum qib_sdma_events last_event;
516};
517
518struct xmit_wait {
519 struct timer_list timer;
520 u64 counter;
521 u8 flags;
522 struct cache {
523 u64 psxmitdata;
524 u64 psrcvdata;
525 u64 psxmitpkts;
526 u64 psrcvpkts;
527 u64 psxmitwait;
528 } counter_cache;
529};
530
531
532
533
534
535
536
537struct qib_pportdata {
538 struct qib_ibport ibport_data;
539
540 struct qib_devdata *dd;
541 struct qib_chippport_specific *cpspec;
542 struct kobject pport_kobj;
543 struct kobject pport_cc_kobj;
544 struct kobject sl2vl_kobj;
545 struct kobject diagc_kobj;
546
547
548 __be64 guid;
549
550
551 u32 lflags;
552
553 u32 state_wanted;
554 spinlock_t lflags_lock;
555
556
557 atomic_t pkeyrefs[4];
558
559
560
561
562
563 u64 *statusp;
564
565
566
567
568 struct qib_sdma_desc *sdma_descq;
569 struct workqueue_struct *qib_wq;
570 struct qib_sdma_state sdma_state;
571 dma_addr_t sdma_descq_phys;
572 volatile __le64 *sdma_head_dma;
573 dma_addr_t sdma_head_phys;
574 u16 sdma_descq_cnt;
575
576
577 spinlock_t sdma_lock ____cacheline_aligned_in_smp;
578 struct list_head sdma_activelist;
579 u64 sdma_descq_added;
580 u64 sdma_descq_removed;
581 u16 sdma_descq_tail;
582 u16 sdma_descq_head;
583 u8 sdma_generation;
584
585 struct tasklet_struct sdma_sw_clean_up_task
586 ____cacheline_aligned_in_smp;
587
588 wait_queue_head_t state_wait;
589
590
591 unsigned hol_state;
592 struct timer_list hol_timer;
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609 u64 lastibcstat;
610
611
612
613
614
615
616
617 unsigned long p_rcvctrl;
618 unsigned long p_sendctrl;
619
620 u32 ibmtu;
621
622
623
624
625 u32 ibmaxlen;
626
627
628
629
630 u32 init_ibmaxlen;
631
632 u16 lid;
633
634 u16 pkeys[4];
635
636 u8 lmc;
637 u8 link_width_supported;
638 u8 link_speed_supported;
639 u8 link_width_enabled;
640 u8 link_speed_enabled;
641 u8 link_width_active;
642 u8 link_speed_active;
643 u8 vls_supported;
644 u8 vls_operational;
645
646 u8 rx_pol_inv;
647
648 u8 hw_pidx;
649 u8 port;
650
651 u8 delay_mult;
652
653
654 u8 led_override;
655 u16 led_override_timeoff;
656 u8 led_override_vals[2];
657 u8 led_override_phase;
658 atomic_t led_override_timer_active;
659
660 struct timer_list led_override_timer;
661 struct xmit_wait cong_stats;
662 struct timer_list symerr_clear_timer;
663
664
665 spinlock_t cc_shadow_lock
666 ____cacheline_aligned_in_smp;
667
668
669 struct cc_table_shadow *ccti_entries_shadow;
670
671
672 struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
673
674
675 struct ib_cc_table_entry_shadow *ccti_entries;
676
677
678 struct ib_cc_congestion_entry_shadow *congestion_entries;
679
680
681
682
683 u16 cc_supported_table_entries;
684
685
686 u16 total_cct_entry;
687
688
689 u16 cc_sl_control_map;
690
691
692 u16 ccti_limit;
693
694
695 u8 cc_max_table_entries;
696};
697
698
699
700
701
702
703
704
705struct diag_observer;
706
707typedef int (*diag_hook) (struct qib_devdata *dd,
708 const struct diag_observer *op,
709 u32 offs, u64 *data, u64 mask, int only_32);
710
711struct diag_observer {
712 diag_hook hook;
713 u32 bottom;
714 u32 top;
715};
716
717extern int qib_register_observer(struct qib_devdata *dd,
718 const struct diag_observer *op);
719
720
721struct diag_observer_list_elt;
722
723
724
725
726
727
728struct qib_devdata {
729 struct qib_ibdev verbs_dev;
730 struct list_head list;
731
732
733 struct pci_dev *pcidev;
734 struct cdev *user_cdev;
735 struct cdev *diag_cdev;
736 struct device *user_device;
737 struct device *diag_device;
738
739
740 u64 __iomem *kregbase;
741
742 u64 __iomem *kregend;
743
744 resource_size_t physaddr;
745
746 struct qib_ctxtdata **rcd;
747
748
749
750
751 struct qib_pportdata *pport;
752 struct qib_chip_specific *cspec;
753
754
755 void __iomem *pio2kbase;
756
757 void __iomem *pio4kbase;
758
759 void __iomem *piobase;
760
761 u64 __iomem *userbase;
762 void __iomem *piovl15base;
763
764
765
766
767
768
769
770 volatile __le64 *pioavailregs_dma;
771
772 dma_addr_t pioavailregs_phys;
773
774
775
776
777
778
779
780 int (*f_intr_fallback)(struct qib_devdata *);
781
782 int (*f_reset)(struct qib_devdata *);
783 void (*f_quiet_serdes)(struct qib_pportdata *);
784 int (*f_bringup_serdes)(struct qib_pportdata *);
785 int (*f_early_init)(struct qib_devdata *);
786 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
787 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
788 u32, unsigned long);
789 void (*f_cleanup)(struct qib_devdata *);
790 void (*f_setextled)(struct qib_pportdata *, u32);
791
792 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
793
794 void (*f_free_irq)(struct qib_devdata *);
795 struct qib_message_header *(*f_get_msgheader)
796 (struct qib_devdata *, __le32 *);
797 void (*f_config_ctxts)(struct qib_devdata *);
798 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
799 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
800 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
801 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
802 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
803 u32 (*f_iblink_state)(u64);
804 u8 (*f_ibphys_portstate)(u64);
805 void (*f_xgxs_reset)(struct qib_pportdata *);
806
807 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
808 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
809
810 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
811 u32 mask);
812
813 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
814
815
816
817
818
819
820 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
821 int ctxt);
822
823 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
824 void (*f_set_intr_state)(struct qib_devdata *, u32);
825 void (*f_set_armlaunch)(struct qib_devdata *, u32);
826 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
827 int (*f_late_initreg)(struct qib_devdata *);
828 int (*f_init_sdma_regs)(struct qib_pportdata *);
829 u16 (*f_sdma_gethead)(struct qib_pportdata *);
830 int (*f_sdma_busy)(struct qib_pportdata *);
831 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
832 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
833 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
834 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
835 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
836 void (*f_sdma_init_early)(struct qib_pportdata *);
837 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
838 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
839 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
840 u64 (*f_portcntr)(struct qib_pportdata *, u32);
841 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
842 u64 **);
843 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
844 char **, u64 **);
845 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
846 void (*f_initvl15_bufs)(struct qib_devdata *);
847 void (*f_init_ctxt)(struct qib_ctxtdata *);
848 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
849 struct qib_ctxtdata *);
850 void (*f_writescratch)(struct qib_devdata *, u32);
851 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
852#ifdef CONFIG_INFINIBAND_QIB_DCA
853 int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
854#endif
855
856 char *boardname;
857
858
859 u64 tidtemplate;
860
861 u64 tidinvalid;
862
863
864 u32 pioavregs;
865
866 u32 flags;
867
868 u32 lastctxt_piobuf;
869
870
871 u32 int_counter;
872
873
874 u32 pbufsctxt;
875
876 u32 ctxts_extrabuf;
877
878
879
880
881 u32 cfgctxts;
882
883
884
885 u32 freectxts;
886
887
888
889
890
891 u32 upd_pio_shadow;
892
893
894 u32 maxpkts_call;
895 u32 avgpkts_call;
896 u64 nopiobufs;
897
898
899 u16 vendorid;
900
901 u16 deviceid;
902
903 unsigned long wc_cookie;
904 unsigned long wc_base;
905 unsigned long wc_len;
906
907
908 struct page **pageshadow;
909
910 dma_addr_t *physshadow;
911 u64 __iomem *egrtidbase;
912 spinlock_t sendctrl_lock;
913
914 spinlock_t uctxt_lock;
915
916
917
918
919
920 u64 *devstatusp;
921 char *freezemsg;
922 u32 freezelen;
923
924 struct timer_list stats_timer;
925
926
927 struct timer_list intrchk_timer;
928 unsigned long ureg_align;
929
930
931
932
933
934 spinlock_t pioavail_lock;
935
936
937
938 u32 last_pio;
939
940
941
942 u32 min_kernel_pio;
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958 unsigned long pioavailshadow[6];
959
960 unsigned long pioavailkernel[6];
961
962 unsigned long pio_need_disarm[3];
963
964 unsigned long pio_writing[3];
965
966 u64 revision;
967
968 __be64 base_guid;
969
970
971
972
973
974 u64 piobufbase;
975 u32 pio2k_bufbase;
976
977
978
979
980 u32 nguid;
981
982
983
984
985 unsigned long rcvctrl;
986 unsigned long sendctrl;
987
988
989 u32 rcvhdrcnt;
990
991 u32 rcvhdrsize;
992
993 u32 rcvhdrentsize;
994
995 u32 ctxtcnt;
996
997 u32 palign;
998
999 u32 piobcnt2k;
1000
1001 u32 piosize2k;
1002
1003 u32 piosize2kmax_dwords;
1004
1005 u32 piobcnt4k;
1006
1007 u32 piosize4k;
1008
1009 u32 rcvegrbase;
1010
1011 u32 rcvtidbase;
1012
1013 u32 rcvtidcnt;
1014
1015 u32 uregbase;
1016
1017 u32 control;
1018
1019
1020 u32 align4k;
1021
1022 u16 rcvegrbufsize;
1023
1024 u16 rcvegrbufsize_shift;
1025
1026 u32 lbus_width;
1027
1028 u32 lbus_speed;
1029 int unit;
1030
1031
1032
1033 u32 msi_lo;
1034
1035 u32 msi_hi;
1036
1037 u16 msi_data;
1038
1039 u32 pcibar0;
1040
1041 u32 pcibar1;
1042 u64 rhdrhead_intr_off;
1043
1044
1045
1046
1047
1048 u8 serial[16];
1049
1050 u8 boardversion[96];
1051 u8 lbus_info[32];
1052
1053 u8 majrev;
1054
1055 u8 minrev;
1056
1057
1058
1059 u8 num_pports;
1060
1061 u8 first_user_ctxt;
1062 u8 n_krcv_queues;
1063 u8 qpn_mask;
1064 u8 skip_kctxt_mask;
1065
1066 u16 rhf_offset;
1067
1068
1069
1070
1071 u8 gpio_sda_num;
1072 u8 gpio_scl_num;
1073 u8 twsi_eeprom_dev;
1074 u8 board_atten;
1075
1076
1077
1078 spinlock_t eep_st_lock;
1079
1080 struct mutex eep_lock;
1081 uint64_t traffic_wds;
1082
1083 atomic_t active_time;
1084
1085 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
1086 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
1087 uint16_t eep_hrs;
1088
1089
1090
1091
1092 struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
1093 struct qib_diag_client *diag_client;
1094 spinlock_t qib_diag_trans_lock;
1095 struct diag_observer_list_elt *diag_observer_list;
1096
1097 u8 psxmitwait_supported;
1098
1099 u16 psxmitwait_check_rate;
1100
1101 struct tasklet_struct error_tasklet;
1102
1103 struct kthread_worker *worker;
1104
1105 int assigned_node_id;
1106};
1107
1108
1109#define QIB_HOL_UP 0
1110#define QIB_HOL_INIT 1
1111
1112#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1113#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1114#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1115#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1116#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1117
1118
1119#define TXCHK_CHG_TYPE_DIS1 3
1120#define TXCHK_CHG_TYPE_ENAB1 2
1121#define TXCHK_CHG_TYPE_KERN 1
1122#define TXCHK_CHG_TYPE_USER 0
1123
1124#define QIB_CHASE_TIME msecs_to_jiffies(145)
1125#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1126
1127
1128struct qib_filedata {
1129 struct qib_ctxtdata *rcd;
1130 unsigned subctxt;
1131 unsigned tidcursor;
1132 struct qib_user_sdma_queue *pq;
1133 int rec_cpu_num;
1134};
1135
1136extern struct list_head qib_dev_list;
1137extern spinlock_t qib_devs_lock;
1138extern struct qib_devdata *qib_lookup(int unit);
1139extern u32 qib_cpulist_count;
1140extern unsigned long *qib_cpulist;
1141
1142extern unsigned qib_wc_pat;
1143extern unsigned qib_cc_table_size;
1144int qib_init(struct qib_devdata *, int);
1145int init_chip_wc_pat(struct qib_devdata *dd, u32);
1146int qib_enable_wc(struct qib_devdata *dd);
1147void qib_disable_wc(struct qib_devdata *dd);
1148int qib_count_units(int *npresentp, int *nupp);
1149int qib_count_active_units(void);
1150
1151int qib_cdev_init(int minor, const char *name,
1152 const struct file_operations *fops,
1153 struct cdev **cdevp, struct device **devp);
1154void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1155int qib_dev_init(void);
1156void qib_dev_cleanup(void);
1157
1158int qib_diag_add(struct qib_devdata *);
1159void qib_diag_remove(struct qib_devdata *);
1160void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1161void qib_sdma_update_tail(struct qib_pportdata *, u16);
1162
1163int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1164void qib_bad_intrstatus(struct qib_devdata *);
1165void qib_handle_urcv(struct qib_devdata *, u64);
1166
1167
1168void qib_chip_cleanup(struct qib_devdata *);
1169
1170void qib_chip_done(void);
1171
1172
1173int qib_unordered_wc(void);
1174void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1175
1176void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1177int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1178void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1179void qib_cancel_sends(struct qib_pportdata *);
1180
1181int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1182int qib_setup_eagerbufs(struct qib_ctxtdata *);
1183void qib_set_ctxtcnt(struct qib_devdata *);
1184int qib_create_ctxts(struct qib_devdata *dd);
1185struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
1186void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1187void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1188
1189u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1190int qib_reset_device(int);
1191int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1192int qib_set_linkstate(struct qib_pportdata *, u8);
1193int qib_set_mtu(struct qib_pportdata *, u16);
1194int qib_set_lid(struct qib_pportdata *, u32, u8);
1195void qib_hol_down(struct qib_pportdata *);
1196void qib_hol_init(struct qib_pportdata *);
1197void qib_hol_up(struct qib_pportdata *);
1198void qib_hol_event(unsigned long);
1199void qib_disable_after_error(struct qib_devdata *);
1200int qib_set_uevent_bits(struct qib_pportdata *, const int);
1201
1202
1203#define ctxt_fp(fp) \
1204 (((struct qib_filedata *)(fp)->private_data)->rcd)
1205#define subctxt_fp(fp) \
1206 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1207#define tidcursor_fp(fp) \
1208 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1209#define user_sdma_queue_fp(fp) \
1210 (((struct qib_filedata *)(fp)->private_data)->pq)
1211
1212static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1213{
1214 return ppd->dd;
1215}
1216
1217static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1218{
1219 return container_of(dev, struct qib_devdata, verbs_dev);
1220}
1221
1222static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1223{
1224 return dd_from_dev(to_idev(ibdev));
1225}
1226
1227static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1228{
1229 return container_of(ibp, struct qib_pportdata, ibport_data);
1230}
1231
1232static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1233{
1234 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1235 unsigned pidx = port - 1;
1236
1237 WARN_ON(pidx >= dd->num_pports);
1238 return &dd->pport[pidx].ibport_data;
1239}
1240
1241
1242
1243
1244#define QIB_HAS_LINK_LATENCY 0x1
1245#define QIB_INITTED 0x2
1246#define QIB_DOING_RESET 0x4
1247#define QIB_PRESENT 0x8
1248#define QIB_PIO_FLUSH_WC 0x10
1249#define QIB_HAS_THRESH_UPDATE 0x40
1250#define QIB_HAS_SDMA_TIMEOUT 0x80
1251#define QIB_USE_SPCL_TRIG 0x100
1252#define QIB_NODMA_RTAIL 0x200
1253#define QIB_HAS_INTX 0x800
1254#define QIB_HAS_SEND_DMA 0x1000
1255#define QIB_HAS_VLSUPP 0x2000
1256#define QIB_HAS_HDRSUPP 0x4000
1257#define QIB_BADINTR 0x8000
1258#define QIB_DCA_ENABLED 0x10000
1259#define QIB_HAS_QSFP 0x20000
1260
1261
1262
1263
1264#define QIBL_LINKV 0x1
1265#define QIBL_LINKDOWN 0x8
1266#define QIBL_LINKINIT 0x10
1267#define QIBL_LINKARMED 0x20
1268#define QIBL_LINKACTIVE 0x40
1269
1270#define QIBL_IB_AUTONEG_INPROG 0x1000
1271#define QIBL_IB_AUTONEG_FAILED 0x2000
1272#define QIBL_IB_LINK_DISABLED 0x4000
1273
1274#define QIBL_IB_FORCE_NOTIFY 0x8000
1275
1276
1277#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1278
1279
1280
1281
1282#define QIB_CTXT_WAITING_RCV 2
1283
1284#define QIB_CTXT_MASTER_UNINIT 4
1285
1286#define QIB_CTXT_WAITING_URG 5
1287
1288
1289void qib_free_data(struct qib_ctxtdata *dd);
1290void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1291 u32, struct qib_ctxtdata *);
1292struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1293 const struct pci_device_id *);
1294struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1295 const struct pci_device_id *);
1296struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1297 const struct pci_device_id *);
1298void qib_free_devdata(struct qib_devdata *);
1299struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1300
1301#define QIB_TWSI_NO_DEV 0xFF
1302
1303int qib_twsi_reset(struct qib_devdata *dd);
1304int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1305 int len);
1306int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1307 const void *buffer, int len);
1308void qib_get_eeprom_info(struct qib_devdata *);
1309int qib_update_eeprom_log(struct qib_devdata *dd);
1310void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1311void qib_dump_lookup_output_queue(struct qib_devdata *);
1312void qib_force_pio_avail_update(struct qib_devdata *);
1313void qib_clear_symerror_on_linkup(unsigned long opaque);
1314
1315
1316
1317
1318
1319
1320#define QIB_LED_PHYS 1
1321#define QIB_LED_LOG 2
1322void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1323
1324
1325int qib_setup_sdma(struct qib_pportdata *);
1326void qib_teardown_sdma(struct qib_pportdata *);
1327void __qib_sdma_intr(struct qib_pportdata *);
1328void qib_sdma_intr(struct qib_pportdata *);
1329int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
1330 u32, struct qib_verbs_txreq *);
1331
1332int qib_sdma_make_progress(struct qib_pportdata *dd);
1333
1334static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
1335{
1336 return ppd->sdma_descq_added == ppd->sdma_descq_removed;
1337}
1338
1339
1340static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1341{
1342 return ppd->sdma_descq_cnt -
1343 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1344}
1345
1346static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1347{
1348 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1349}
1350int qib_sdma_running(struct qib_pportdata *);
1351void dump_sdma_state(struct qib_pportdata *ppd);
1352void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1353void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1354
1355
1356
1357
1358#define QIB_DFLT_RCVHDRSIZE 9
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371#define QIB_RCVHDR_ENTSIZE 32
1372
1373int qib_get_user_pages(unsigned long, size_t, struct page **);
1374void qib_release_user_pages(struct page **, size_t);
1375int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1376int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1377u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1378void qib_sendbuf_done(struct qib_devdata *, unsigned);
1379
1380static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1381{
1382 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1383}
1384
1385static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1386{
1387
1388
1389
1390
1391 return (u32) le64_to_cpu(
1392 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr));
1393}
1394
1395static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1396{
1397 const struct qib_devdata *dd = rcd->dd;
1398 u32 hdrqtail;
1399
1400 if (dd->flags & QIB_NODMA_RTAIL) {
1401 __le32 *rhf_addr;
1402 u32 seq;
1403
1404 rhf_addr = (__le32 *) rcd->rcvhdrq +
1405 rcd->head + dd->rhf_offset;
1406 seq = qib_hdrget_seq(rhf_addr);
1407 hdrqtail = rcd->head;
1408 if (seq == rcd->seq_cnt)
1409 hdrqtail++;
1410 } else
1411 hdrqtail = qib_get_rcvhdrtail(rcd);
1412
1413 return hdrqtail;
1414}
1415
1416
1417
1418
1419
1420extern const char ib_qib_version[];
1421
1422int qib_device_create(struct qib_devdata *);
1423void qib_device_remove(struct qib_devdata *);
1424
1425int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1426 struct kobject *kobj);
1427int qib_verbs_register_sysfs(struct qib_devdata *);
1428void qib_verbs_unregister_sysfs(struct qib_devdata *);
1429
1430extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1431
1432int __init qib_init_qibfs(void);
1433int __exit qib_exit_qibfs(void);
1434
1435int qibfs_add(struct qib_devdata *);
1436int qibfs_remove(struct qib_devdata *);
1437
1438int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1439int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1440 const struct pci_device_id *);
1441void qib_pcie_ddcleanup(struct qib_devdata *);
1442int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
1443int qib_reinit_intr(struct qib_devdata *);
1444void qib_enable_intx(struct pci_dev *);
1445void qib_nomsi(struct qib_devdata *);
1446void qib_nomsix(struct qib_devdata *);
1447void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1448void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1449
1450
1451
1452
1453dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1454 size_t, int);
1455const char *qib_get_unit_name(int unit);
1456
1457
1458
1459
1460
1461#if defined(CONFIG_X86_64)
1462#define qib_flush_wc() asm volatile("sfence" : : : "memory")
1463#else
1464#define qib_flush_wc() wmb()
1465#endif
1466
1467
1468extern unsigned qib_ibmtu;
1469extern ushort qib_cfgctxts;
1470extern ushort qib_num_cfg_vls;
1471extern ushort qib_mini_init;
1472extern unsigned qib_n_krcv_queues;
1473extern unsigned qib_sdma_fetch_arb;
1474extern unsigned qib_compat_ddr_negotiate;
1475extern int qib_special_trigger;
1476extern unsigned qib_numa_aware;
1477
1478extern struct mutex qib_mutex;
1479
1480
1481#define STATUS_TIMEOUT 60
1482
1483#define QIB_DRV_NAME "ib_qib"
1484#define QIB_USER_MINOR_BASE 0
1485#define QIB_TRACE_MINOR 127
1486#define QIB_DIAGPKT_MINOR 128
1487#define QIB_DIAG_MINOR_BASE 129
1488#define QIB_NMINORS 255
1489
1490#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1491#define PCI_VENDOR_ID_QLOGIC 0x1077
1492#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1493#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1494#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505#define qib_early_err(dev, fmt, ...) \
1506 dev_err(dev, fmt, ##__VA_ARGS__)
1507
1508#define qib_dev_err(dd, fmt, ...) \
1509 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1510 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1511
1512#define qib_dev_warn(dd, fmt, ...) \
1513 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
1514 qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
1515
1516#define qib_dev_porterr(dd, port, fmt, ...) \
1517 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1518 qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1519 ##__VA_ARGS__)
1520
1521#define qib_devinfo(pcidev, fmt, ...) \
1522 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
1523
1524
1525
1526
1527struct qib_hwerror_msgs {
1528 u64 mask;
1529 const char *msg;
1530 size_t sz;
1531};
1532
1533#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1534
1535
1536void qib_format_hwerrors(u64 hwerrs,
1537 const struct qib_hwerror_msgs *hwerrmsgs,
1538 size_t nhwerrmsgs, char *msg, size_t lmsg);
1539#endif
1540