1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/slab.h>
49#include <linux/io.h>
50#include <linux/fs.h>
51#include <linux/completion.h>
52#include <linux/kref.h>
53#include <linux/sched.h>
54#include <linux/kthread.h>
55#include <rdma/ib_hdrs.h>
56#include <rdma/rdma_vt.h>
57
58#include "qib_common.h"
59#include "qib_verbs.h"
60
61
62#define QIB_CHIP_VERS_MAJ 2U
63
64
65#define QIB_CHIP_VERS_MIN 0U
66
67
68#define QIB_OUI 0x001175
69#define QIB_OUI_LSB 40
70
71
72
73
74
75
76
77
78
79struct qlogic_ib_stats {
80 __u64 sps_ints;
81 __u64 sps_errints;
82 __u64 sps_txerrs;
83 __u64 sps_rcverrs;
84 __u64 sps_hwerrs;
85 __u64 sps_nopiobufs;
86 __u64 sps_ctxts;
87 __u64 sps_lenerrs;
88 __u64 sps_buffull;
89 __u64 sps_hdrfull;
90};
91
92extern struct qlogic_ib_stats qib_stats;
93extern const struct pci_error_handlers qib_pci_err_handler;
94
95#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
96
97
98
99
100
101
102#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
103
104
105
106
107
108#ifdef CONFIG_DEBUG_FS
109struct qib_opcode_stats_perctx;
110#endif
111
112struct qib_ctxtdata {
113 void **rcvegrbuf;
114 dma_addr_t *rcvegrbuf_phys;
115
116 void *rcvhdrq;
117
118 void *rcvhdrtail_kvaddr;
119
120
121
122
123 void *tid_pg_list;
124
125
126
127
128
129 unsigned long *user_event_mask;
130
131 wait_queue_head_t wait;
132
133
134
135
136 dma_addr_t rcvegr_phys;
137
138 dma_addr_t rcvhdrq_phys;
139 dma_addr_t rcvhdrqtailaddr_phys;
140
141
142
143
144
145 int cnt;
146
147
148
149
150
151 unsigned ctxt;
152
153 int node_id;
154
155 u16 subctxt_cnt;
156
157 u16 subctxt_id;
158
159 u16 rcvegrcnt;
160
161 u16 rcvegr_tid_base;
162
163 u32 piocnt;
164
165 u32 pio_base;
166
167 u32 piobufs;
168
169 u32 rcvegrbuf_chunks;
170
171 u16 rcvegrbufs_perchunk;
172
173 u16 rcvegrbufs_perchunk_shift;
174
175 size_t rcvegrbuf_size;
176
177 size_t rcvhdrq_size;
178
179 unsigned long flag;
180
181 u32 tidcursor;
182
183 u32 rcvwait_to;
184
185 u32 piowait_to;
186
187 u32 rcvnowait;
188
189 u32 pionowait;
190
191 u32 urgent;
192
193 u32 urgent_poll;
194
195 pid_t pid;
196 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
197
198 char comm[16];
199
200 u16 pkeys[4];
201
202 struct qib_devdata *dd;
203
204 struct qib_pportdata *ppd;
205
206 void *subctxt_uregbase;
207
208 void *subctxt_rcvegrbuf;
209
210 void *subctxt_rcvhdr_base;
211
212 u32 userversion;
213
214 u32 active_slaves;
215
216 u16 poll_type;
217
218 u8 seq_cnt;
219 u8 redirect_seq_cnt;
220
221 u32 head;
222
223 struct list_head qp_wait_list;
224#ifdef CONFIG_DEBUG_FS
225
226 struct qib_opcode_stats_perctx *opstats;
227#endif
228};
229
230struct rvt_sge_state;
231
232struct qib_sdma_txreq {
233 int flags;
234 int sg_count;
235 dma_addr_t addr;
236 void (*callback)(struct qib_sdma_txreq *, int);
237 u16 start_idx;
238 u16 next_descq_idx;
239 struct list_head list;
240};
241
242struct qib_sdma_desc {
243 __le64 qw[2];
244};
245
246struct qib_verbs_txreq {
247 struct qib_sdma_txreq txreq;
248 struct rvt_qp *qp;
249 struct rvt_swqe *wqe;
250 u32 dwords;
251 u16 hdr_dwords;
252 u16 hdr_inx;
253 struct qib_pio_header *align_buf;
254 struct rvt_mregion *mr;
255 struct rvt_sge_state *ss;
256};
257
258#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
259#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
260#define QIB_SDMA_TXREQ_F_INTREQ 0x4
261#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
262#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
263
264#define QIB_SDMA_TXREQ_S_OK 0
265#define QIB_SDMA_TXREQ_S_SENDERROR 1
266#define QIB_SDMA_TXREQ_S_ABORTED 2
267#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
268
269
270
271
272
273
274#define QIB_IB_CFG_LIDLMC 0
275#define QIB_IB_CFG_LWID_ENB 2
276#define QIB_IB_CFG_LWID 3
277#define QIB_IB_CFG_SPD_ENB 4
278#define QIB_IB_CFG_SPD 5
279#define QIB_IB_CFG_RXPOL_ENB 6
280#define QIB_IB_CFG_LREV_ENB 7
281#define QIB_IB_CFG_LINKLATENCY 8
282#define QIB_IB_CFG_HRTBT 9
283#define QIB_IB_CFG_OP_VLS 10
284#define QIB_IB_CFG_VL_HIGH_CAP 11
285#define QIB_IB_CFG_VL_LOW_CAP 12
286#define QIB_IB_CFG_OVERRUN_THRESH 13
287#define QIB_IB_CFG_PHYERR_THRESH 14
288#define QIB_IB_CFG_LINKDEFAULT 15
289#define QIB_IB_CFG_PKEYS 16
290#define QIB_IB_CFG_MTU 17
291#define QIB_IB_CFG_LSTATE 18
292#define QIB_IB_CFG_VL_HIGH_LIMIT 19
293#define QIB_IB_CFG_PMA_TICKS 20
294#define QIB_IB_CFG_PORT 21
295
296
297
298
299
300
301#define IB_LINKCMD_DOWN (0 << 16)
302#define IB_LINKCMD_ARMED (1 << 16)
303#define IB_LINKCMD_ACTIVE (2 << 16)
304#define IB_LINKINITCMD_NOP 0
305#define IB_LINKINITCMD_POLL 1
306#define IB_LINKINITCMD_SLEEP 2
307#define IB_LINKINITCMD_DISABLE 3
308
309
310
311
312#define QIB_IB_LINKDOWN 0
313#define QIB_IB_LINKARM 1
314#define QIB_IB_LINKACTIVE 2
315#define QIB_IB_LINKDOWN_ONLY 3
316#define QIB_IB_LINKDOWN_SLEEP 4
317#define QIB_IB_LINKDOWN_DISABLE 5
318
319
320
321
322
323
324
325
326#define QIB_IB_SDR 1
327#define QIB_IB_DDR 2
328#define QIB_IB_QDR 4
329
330#define QIB_DEFAULT_MTU 4096
331
332
333#define QIB_MAX_IB_PORTS 2
334
335
336
337
338#define QIB_IB_TBL_VL_HIGH_ARB 1
339#define QIB_IB_TBL_VL_LOW_ARB 2
340
341
342
343
344
345
346#define QIB_RCVCTRL_TAILUPD_ENB 0x01
347#define QIB_RCVCTRL_TAILUPD_DIS 0x02
348#define QIB_RCVCTRL_CTXT_ENB 0x04
349#define QIB_RCVCTRL_CTXT_DIS 0x08
350#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
351#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
352#define QIB_RCVCTRL_PKEY_ENB 0x40
353#define QIB_RCVCTRL_PKEY_DIS 0x80
354#define QIB_RCVCTRL_BP_ENB 0x0100
355#define QIB_RCVCTRL_BP_DIS 0x0200
356#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
357#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
358
359
360
361
362
363
364
365
366#define QIB_SENDCTRL_DISARM (0x1000)
367#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
368
369#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
370#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
371#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
372#define QIB_SENDCTRL_SEND_DIS (0x20000)
373#define QIB_SENDCTRL_SEND_ENB (0x40000)
374#define QIB_SENDCTRL_FLUSH (0x80000)
375#define QIB_SENDCTRL_CLEAR (0x100000)
376#define QIB_SENDCTRL_DISARM_ALL (0x200000)
377
378
379
380
381
382
383
384
385#define QIBPORTCNTR_PKTSEND 0U
386#define QIBPORTCNTR_WORDSEND 1U
387#define QIBPORTCNTR_PSXMITDATA 2U
388#define QIBPORTCNTR_PSXMITPKTS 3U
389#define QIBPORTCNTR_PSXMITWAIT 4U
390#define QIBPORTCNTR_SENDSTALL 5U
391
392#define QIBPORTCNTR_PKTRCV 6U
393#define QIBPORTCNTR_PSRCVDATA 7U
394#define QIBPORTCNTR_PSRCVPKTS 8U
395#define QIBPORTCNTR_RCVEBP 9U
396#define QIBPORTCNTR_RCVOVFL 10U
397#define QIBPORTCNTR_WORDRCV 11U
398
399#define QIBPORTCNTR_RXLOCALPHYERR 12U
400#define QIBPORTCNTR_RXVLERR 13U
401#define QIBPORTCNTR_ERRICRC 14U
402#define QIBPORTCNTR_ERRVCRC 15U
403#define QIBPORTCNTR_ERRLPCRC 16U
404#define QIBPORTCNTR_BADFORMAT 17U
405#define QIBPORTCNTR_ERR_RLEN 18U
406#define QIBPORTCNTR_IBSYMBOLERR 19U
407#define QIBPORTCNTR_INVALIDRLEN 20U
408#define QIBPORTCNTR_UNSUPVL 21U
409#define QIBPORTCNTR_EXCESSBUFOVFL 22U
410#define QIBPORTCNTR_ERRLINK 23U
411#define QIBPORTCNTR_IBLINKDOWN 24U
412#define QIBPORTCNTR_IBLINKERRRECOV 25U
413#define QIBPORTCNTR_LLI 26U
414
415#define QIBPORTCNTR_RXDROPPKT 27U
416#define QIBPORTCNTR_VL15PKTDROP 28U
417#define QIBPORTCNTR_ERRPKEY 29U
418#define QIBPORTCNTR_KHDROVFL 30U
419
420#define QIBPORTCNTR_PSINTERVAL 31U
421#define QIBPORTCNTR_PSSTART 32U
422#define QIBPORTCNTR_PSSTAT 33U
423
424
425#define ACTIVITY_TIMER 5
426
427#define MAX_NAME_SIZE 64
428
429#ifdef CONFIG_INFINIBAND_QIB_DCA
430struct qib_irq_notify;
431#endif
432
433struct qib_msix_entry {
434 int irq;
435 void *arg;
436#ifdef CONFIG_INFINIBAND_QIB_DCA
437 int dca;
438 int rcv;
439 struct qib_irq_notify *notifier;
440#endif
441 char name[MAX_NAME_SIZE];
442 cpumask_var_t mask;
443};
444
445
446
447
448
449
450struct qib_chip_specific;
451struct qib_chipport_specific;
452
453enum qib_sdma_states {
454 qib_sdma_state_s00_hw_down,
455 qib_sdma_state_s10_hw_start_up_wait,
456 qib_sdma_state_s20_idle,
457 qib_sdma_state_s30_sw_clean_up_wait,
458 qib_sdma_state_s40_hw_clean_up_wait,
459 qib_sdma_state_s50_hw_halt_wait,
460 qib_sdma_state_s99_running,
461};
462
463enum qib_sdma_events {
464 qib_sdma_event_e00_go_hw_down,
465 qib_sdma_event_e10_go_hw_start,
466 qib_sdma_event_e20_hw_started,
467 qib_sdma_event_e30_go_running,
468 qib_sdma_event_e40_sw_cleaned,
469 qib_sdma_event_e50_hw_cleaned,
470 qib_sdma_event_e60_hw_halted,
471 qib_sdma_event_e70_go_idle,
472 qib_sdma_event_e7220_err_halted,
473 qib_sdma_event_e7322_err_halted,
474 qib_sdma_event_e90_timer_tick,
475};
476
477extern char *qib_sdma_state_names[];
478extern char *qib_sdma_event_names[];
479
480struct sdma_set_state_action {
481 unsigned op_enable:1;
482 unsigned op_intenable:1;
483 unsigned op_halt:1;
484 unsigned op_drain:1;
485 unsigned go_s99_running_tofalse:1;
486 unsigned go_s99_running_totrue:1;
487};
488
489struct qib_sdma_state {
490 struct kref kref;
491 struct completion comp;
492 enum qib_sdma_states current_state;
493 struct sdma_set_state_action *set_state_action;
494 unsigned current_op;
495 unsigned go_s99_running;
496 unsigned first_sendbuf;
497 unsigned last_sendbuf;
498
499 enum qib_sdma_states previous_state;
500 unsigned previous_op;
501 enum qib_sdma_events last_event;
502};
503
504struct xmit_wait {
505 struct timer_list timer;
506 u64 counter;
507 u8 flags;
508 struct cache {
509 u64 psxmitdata;
510 u64 psrcvdata;
511 u64 psxmitpkts;
512 u64 psrcvpkts;
513 u64 psxmitwait;
514 } counter_cache;
515};
516
517
518
519
520
521
522
523struct qib_pportdata {
524 struct qib_ibport ibport_data;
525
526 struct qib_devdata *dd;
527 struct qib_chippport_specific *cpspec;
528 struct kobject pport_kobj;
529 struct kobject pport_cc_kobj;
530 struct kobject sl2vl_kobj;
531 struct kobject diagc_kobj;
532
533
534 __be64 guid;
535
536
537 u32 lflags;
538
539 u32 state_wanted;
540 spinlock_t lflags_lock;
541
542
543 atomic_t pkeyrefs[4];
544
545
546
547
548
549 u64 *statusp;
550
551
552
553
554 struct qib_sdma_desc *sdma_descq;
555 struct workqueue_struct *qib_wq;
556 struct qib_sdma_state sdma_state;
557 dma_addr_t sdma_descq_phys;
558 volatile __le64 *sdma_head_dma;
559 dma_addr_t sdma_head_phys;
560 u16 sdma_descq_cnt;
561
562
563 spinlock_t sdma_lock ____cacheline_aligned_in_smp;
564 struct list_head sdma_activelist;
565 struct list_head sdma_userpending;
566 u64 sdma_descq_added;
567 u64 sdma_descq_removed;
568 u16 sdma_descq_tail;
569 u16 sdma_descq_head;
570 u8 sdma_generation;
571 u8 sdma_intrequest;
572
573 struct tasklet_struct sdma_sw_clean_up_task
574 ____cacheline_aligned_in_smp;
575
576 wait_queue_head_t state_wait;
577
578
579 unsigned hol_state;
580 struct timer_list hol_timer;
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597 u64 lastibcstat;
598
599
600
601
602
603
604
605 unsigned long p_rcvctrl;
606 unsigned long p_sendctrl;
607
608 u32 ibmtu;
609
610
611
612
613 u32 ibmaxlen;
614
615
616
617
618 u32 init_ibmaxlen;
619
620 u16 lid;
621
622 u16 pkeys[4];
623
624 u8 lmc;
625 u8 link_width_supported;
626 u8 link_speed_supported;
627 u8 link_width_enabled;
628 u8 link_speed_enabled;
629 u8 link_width_active;
630 u8 link_speed_active;
631 u8 vls_supported;
632 u8 vls_operational;
633
634 u8 rx_pol_inv;
635
636 u8 hw_pidx;
637 u8 port;
638
639 u8 delay_mult;
640
641
642 u8 led_override;
643 u16 led_override_timeoff;
644 u8 led_override_vals[2];
645 u8 led_override_phase;
646 atomic_t led_override_timer_active;
647
648 struct timer_list led_override_timer;
649 struct xmit_wait cong_stats;
650 struct timer_list symerr_clear_timer;
651
652
653 spinlock_t cc_shadow_lock
654 ____cacheline_aligned_in_smp;
655
656
657 struct cc_table_shadow *ccti_entries_shadow;
658
659
660 struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
661
662
663 struct ib_cc_table_entry_shadow *ccti_entries;
664
665
666 struct ib_cc_congestion_entry_shadow *congestion_entries;
667
668
669
670
671 u16 cc_supported_table_entries;
672
673
674 u16 total_cct_entry;
675
676
677 u16 cc_sl_control_map;
678
679
680 u16 ccti_limit;
681
682
683 u8 cc_max_table_entries;
684};
685
686
687
688
689
690
691
692
693struct diag_observer;
694
695typedef int (*diag_hook) (struct qib_devdata *dd,
696 const struct diag_observer *op,
697 u32 offs, u64 *data, u64 mask, int only_32);
698
699struct diag_observer {
700 diag_hook hook;
701 u32 bottom;
702 u32 top;
703};
704
705extern int qib_register_observer(struct qib_devdata *dd,
706 const struct diag_observer *op);
707
708
709struct diag_observer_list_elt;
710
711
712
713
714
715
716struct qib_devdata {
717 struct qib_ibdev verbs_dev;
718 struct list_head list;
719
720
721 struct pci_dev *pcidev;
722 struct cdev *user_cdev;
723 struct cdev *diag_cdev;
724 struct device *user_device;
725 struct device *diag_device;
726
727
728 u64 __iomem *kregbase;
729
730 u64 __iomem *kregend;
731
732 resource_size_t physaddr;
733
734 struct qib_ctxtdata **rcd;
735
736
737
738
739 struct qib_pportdata *pport;
740 struct qib_chip_specific *cspec;
741
742
743 void __iomem *pio2kbase;
744
745 void __iomem *pio4kbase;
746
747 void __iomem *piobase;
748
749 u64 __iomem *userbase;
750 void __iomem *piovl15base;
751
752
753
754
755
756
757
758 volatile __le64 *pioavailregs_dma;
759
760 dma_addr_t pioavailregs_phys;
761
762
763
764
765
766
767
768 int (*f_intr_fallback)(struct qib_devdata *);
769
770 int (*f_reset)(struct qib_devdata *);
771 void (*f_quiet_serdes)(struct qib_pportdata *);
772 int (*f_bringup_serdes)(struct qib_pportdata *);
773 int (*f_early_init)(struct qib_devdata *);
774 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
775 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
776 u32, unsigned long);
777 void (*f_cleanup)(struct qib_devdata *);
778 void (*f_setextled)(struct qib_pportdata *, u32);
779
780 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
781
782 void (*f_free_irq)(struct qib_devdata *);
783 struct qib_message_header *(*f_get_msgheader)
784 (struct qib_devdata *, __le32 *);
785 void (*f_config_ctxts)(struct qib_devdata *);
786 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
787 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
788 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
789 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
790 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
791 u32 (*f_iblink_state)(u64);
792 u8 (*f_ibphys_portstate)(u64);
793 void (*f_xgxs_reset)(struct qib_pportdata *);
794
795 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
796 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
797
798 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
799 u32 mask);
800
801 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
802
803
804
805
806
807
808 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
809 int ctxt);
810
811 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
812 void (*f_set_intr_state)(struct qib_devdata *, u32);
813 void (*f_set_armlaunch)(struct qib_devdata *, u32);
814 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
815 int (*f_late_initreg)(struct qib_devdata *);
816 int (*f_init_sdma_regs)(struct qib_pportdata *);
817 u16 (*f_sdma_gethead)(struct qib_pportdata *);
818 int (*f_sdma_busy)(struct qib_pportdata *);
819 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
820 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
821 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
822 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
823 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
824 void (*f_sdma_init_early)(struct qib_pportdata *);
825 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
826 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
827 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
828 u64 (*f_portcntr)(struct qib_pportdata *, u32);
829 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
830 u64 **);
831 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
832 char **, u64 **);
833 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
834 void (*f_initvl15_bufs)(struct qib_devdata *);
835 void (*f_init_ctxt)(struct qib_ctxtdata *);
836 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
837 struct qib_ctxtdata *);
838 void (*f_writescratch)(struct qib_devdata *, u32);
839 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
840#ifdef CONFIG_INFINIBAND_QIB_DCA
841 int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
842#endif
843
844 char *boardname;
845
846
847 u64 tidtemplate;
848
849 u64 tidinvalid;
850
851
852 u32 pioavregs;
853
854 u32 flags;
855
856 u32 lastctxt_piobuf;
857
858
859 u64 z_int_counter;
860
861 u64 __percpu *int_counter;
862
863
864 u32 pbufsctxt;
865
866 u32 ctxts_extrabuf;
867
868
869
870
871 u32 cfgctxts;
872
873
874
875 u32 freectxts;
876
877
878
879
880
881 u32 upd_pio_shadow;
882
883
884 u32 maxpkts_call;
885 u32 avgpkts_call;
886 u64 nopiobufs;
887
888
889 u16 vendorid;
890
891 u16 deviceid;
892
893 unsigned long wc_cookie;
894 unsigned long wc_base;
895 unsigned long wc_len;
896
897
898 struct page **pageshadow;
899
900 dma_addr_t *physshadow;
901 u64 __iomem *egrtidbase;
902 spinlock_t sendctrl_lock;
903
904 spinlock_t uctxt_lock;
905
906
907
908
909
910 u64 *devstatusp;
911 char *freezemsg;
912 u32 freezelen;
913
914 struct timer_list stats_timer;
915
916
917 struct timer_list intrchk_timer;
918 unsigned long ureg_align;
919
920
921
922
923
924 spinlock_t pioavail_lock;
925
926
927
928 u32 last_pio;
929
930
931
932 u32 min_kernel_pio;
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948 unsigned long pioavailshadow[6];
949
950 unsigned long pioavailkernel[6];
951
952 unsigned long pio_need_disarm[3];
953
954 unsigned long pio_writing[3];
955
956 u64 revision;
957
958 __be64 base_guid;
959
960
961
962
963
964 u64 piobufbase;
965 u32 pio2k_bufbase;
966
967
968
969
970 u32 nguid;
971
972
973
974
975 unsigned long rcvctrl;
976 unsigned long sendctrl;
977
978
979 u32 rcvhdrcnt;
980
981 u32 rcvhdrsize;
982
983 u32 rcvhdrentsize;
984
985 u32 ctxtcnt;
986
987 u32 palign;
988
989 u32 piobcnt2k;
990
991 u32 piosize2k;
992
993 u32 piosize2kmax_dwords;
994
995 u32 piobcnt4k;
996
997 u32 piosize4k;
998
999 u32 rcvegrbase;
1000
1001 u32 rcvtidbase;
1002
1003 u32 rcvtidcnt;
1004
1005 u32 uregbase;
1006
1007 u32 control;
1008
1009
1010 u32 align4k;
1011
1012 u16 rcvegrbufsize;
1013
1014 u16 rcvegrbufsize_shift;
1015
1016 u32 lbus_width;
1017
1018 u32 lbus_speed;
1019 int unit;
1020
1021
1022
1023 u32 msi_lo;
1024
1025 u32 msi_hi;
1026
1027 u16 msi_data;
1028
1029 u32 pcibar0;
1030
1031 u32 pcibar1;
1032 u64 rhdrhead_intr_off;
1033
1034
1035
1036
1037
1038 u8 serial[16];
1039
1040 u8 boardversion[96];
1041 u8 lbus_info[32];
1042
1043 u8 majrev;
1044
1045 u8 minrev;
1046
1047
1048
1049 u8 num_pports;
1050
1051 u8 first_user_ctxt;
1052 u8 n_krcv_queues;
1053 u8 qpn_mask;
1054 u8 skip_kctxt_mask;
1055
1056 u16 rhf_offset;
1057
1058
1059
1060
1061 u8 gpio_sda_num;
1062 u8 gpio_scl_num;
1063 u8 twsi_eeprom_dev;
1064 u8 board_atten;
1065
1066
1067
1068 spinlock_t eep_st_lock;
1069
1070 struct mutex eep_lock;
1071 uint64_t traffic_wds;
1072 struct qib_diag_client *diag_client;
1073 spinlock_t qib_diag_trans_lock;
1074 struct diag_observer_list_elt *diag_observer_list;
1075
1076 u8 psxmitwait_supported;
1077
1078 u16 psxmitwait_check_rate;
1079
1080 struct tasklet_struct error_tasklet;
1081
1082 int assigned_node_id;
1083};
1084
1085
1086#define QIB_HOL_UP 0
1087#define QIB_HOL_INIT 1
1088
1089#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1090#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1091#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1092#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1093#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1094
1095
1096#define TXCHK_CHG_TYPE_DIS1 3
1097#define TXCHK_CHG_TYPE_ENAB1 2
1098#define TXCHK_CHG_TYPE_KERN 1
1099#define TXCHK_CHG_TYPE_USER 0
1100
1101#define QIB_CHASE_TIME msecs_to_jiffies(145)
1102#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1103
1104
1105struct qib_filedata {
1106 struct qib_ctxtdata *rcd;
1107 unsigned subctxt;
1108 unsigned tidcursor;
1109 struct qib_user_sdma_queue *pq;
1110 int rec_cpu_num;
1111};
1112
1113extern struct list_head qib_dev_list;
1114extern spinlock_t qib_devs_lock;
1115extern struct qib_devdata *qib_lookup(int unit);
1116extern u32 qib_cpulist_count;
1117extern unsigned long *qib_cpulist;
1118
1119extern unsigned qib_wc_pat;
1120extern unsigned qib_cc_table_size;
1121
1122int qib_init(struct qib_devdata *, int);
1123int init_chip_wc_pat(struct qib_devdata *dd, u32);
1124int qib_enable_wc(struct qib_devdata *dd);
1125void qib_disable_wc(struct qib_devdata *dd);
1126int qib_count_units(int *npresentp, int *nupp);
1127int qib_count_active_units(void);
1128
1129int qib_cdev_init(int minor, const char *name,
1130 const struct file_operations *fops,
1131 struct cdev **cdevp, struct device **devp);
1132void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1133int qib_dev_init(void);
1134void qib_dev_cleanup(void);
1135
1136int qib_diag_add(struct qib_devdata *);
1137void qib_diag_remove(struct qib_devdata *);
1138void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1139void qib_sdma_update_tail(struct qib_pportdata *, u16);
1140
1141int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1142void qib_bad_intrstatus(struct qib_devdata *);
1143void qib_handle_urcv(struct qib_devdata *, u64);
1144
1145
1146void qib_chip_cleanup(struct qib_devdata *);
1147
1148void qib_chip_done(void);
1149
1150
1151int qib_unordered_wc(void);
1152void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1153
1154void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1155int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1156void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1157void qib_cancel_sends(struct qib_pportdata *);
1158
1159int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1160int qib_setup_eagerbufs(struct qib_ctxtdata *);
1161void qib_set_ctxtcnt(struct qib_devdata *);
1162int qib_create_ctxts(struct qib_devdata *dd);
1163struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
1164int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1165void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1166
1167u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1168int qib_reset_device(int);
1169int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1170int qib_set_linkstate(struct qib_pportdata *, u8);
1171int qib_set_mtu(struct qib_pportdata *, u16);
1172int qib_set_lid(struct qib_pportdata *, u32, u8);
1173void qib_hol_down(struct qib_pportdata *);
1174void qib_hol_init(struct qib_pportdata *);
1175void qib_hol_up(struct qib_pportdata *);
1176void qib_hol_event(struct timer_list *);
1177void qib_disable_after_error(struct qib_devdata *);
1178int qib_set_uevent_bits(struct qib_pportdata *, const int);
1179
1180
1181#define ctxt_fp(fp) \
1182 (((struct qib_filedata *)(fp)->private_data)->rcd)
1183#define subctxt_fp(fp) \
1184 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1185#define tidcursor_fp(fp) \
1186 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1187#define user_sdma_queue_fp(fp) \
1188 (((struct qib_filedata *)(fp)->private_data)->pq)
1189
1190static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1191{
1192 return ppd->dd;
1193}
1194
1195static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1196{
1197 return container_of(dev, struct qib_devdata, verbs_dev);
1198}
1199
1200static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1201{
1202 return dd_from_dev(to_idev(ibdev));
1203}
1204
1205static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1206{
1207 return container_of(ibp, struct qib_pportdata, ibport_data);
1208}
1209
1210static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1211{
1212 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1213 unsigned pidx = port - 1;
1214
1215 WARN_ON(pidx >= dd->num_pports);
1216 return &dd->pport[pidx].ibport_data;
1217}
1218
1219
1220
1221
1222#define QIB_HAS_LINK_LATENCY 0x1
1223#define QIB_INITTED 0x2
1224#define QIB_DOING_RESET 0x4
1225#define QIB_PRESENT 0x8
1226#define QIB_PIO_FLUSH_WC 0x10
1227#define QIB_HAS_THRESH_UPDATE 0x40
1228#define QIB_HAS_SDMA_TIMEOUT 0x80
1229#define QIB_USE_SPCL_TRIG 0x100
1230#define QIB_NODMA_RTAIL 0x200
1231#define QIB_HAS_INTX 0x800
1232#define QIB_HAS_SEND_DMA 0x1000
1233#define QIB_HAS_VLSUPP 0x2000
1234#define QIB_HAS_HDRSUPP 0x4000
1235#define QIB_BADINTR 0x8000
1236#define QIB_DCA_ENABLED 0x10000
1237#define QIB_HAS_QSFP 0x20000
1238#define QIB_SHUTDOWN 0x40000
1239
1240
1241
1242
1243#define QIBL_LINKV 0x1
1244#define QIBL_LINKDOWN 0x8
1245#define QIBL_LINKINIT 0x10
1246#define QIBL_LINKARMED 0x20
1247#define QIBL_LINKACTIVE 0x40
1248
1249#define QIBL_IB_AUTONEG_INPROG 0x1000
1250#define QIBL_IB_AUTONEG_FAILED 0x2000
1251#define QIBL_IB_LINK_DISABLED 0x4000
1252
1253#define QIBL_IB_FORCE_NOTIFY 0x8000
1254
1255
1256#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1257
1258
1259
1260
1261#define QIB_CTXT_WAITING_RCV 2
1262
1263#define QIB_CTXT_MASTER_UNINIT 4
1264
1265#define QIB_CTXT_WAITING_URG 5
1266
1267
1268void qib_free_data(struct qib_ctxtdata *dd);
1269void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1270 u32, struct qib_ctxtdata *);
1271struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1272 const struct pci_device_id *);
1273struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1274 const struct pci_device_id *);
1275struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1276 const struct pci_device_id *);
1277void qib_free_devdata(struct qib_devdata *);
1278struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1279
1280#define QIB_TWSI_NO_DEV 0xFF
1281
1282int qib_twsi_reset(struct qib_devdata *dd);
1283int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1284 int len);
1285int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1286 const void *buffer, int len);
1287void qib_get_eeprom_info(struct qib_devdata *);
1288void qib_dump_lookup_output_queue(struct qib_devdata *);
1289void qib_force_pio_avail_update(struct qib_devdata *);
1290void qib_clear_symerror_on_linkup(struct timer_list *t);
1291
1292
1293
1294
1295
1296
1297#define QIB_LED_PHYS 1
1298#define QIB_LED_LOG 2
1299void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1300
1301
1302int qib_setup_sdma(struct qib_pportdata *);
1303void qib_teardown_sdma(struct qib_pportdata *);
1304void __qib_sdma_intr(struct qib_pportdata *);
1305void qib_sdma_intr(struct qib_pportdata *);
1306void qib_user_sdma_send_desc(struct qib_pportdata *dd,
1307 struct list_head *pktlist);
1308int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
1309 u32, struct qib_verbs_txreq *);
1310
1311int qib_sdma_make_progress(struct qib_pportdata *dd);
1312
1313static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
1314{
1315 return ppd->sdma_descq_added == ppd->sdma_descq_removed;
1316}
1317
1318
1319static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1320{
1321 return ppd->sdma_descq_cnt -
1322 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1323}
1324
1325static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1326{
1327 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1328}
1329int qib_sdma_running(struct qib_pportdata *);
1330void dump_sdma_state(struct qib_pportdata *ppd);
1331void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1332void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1333
1334
1335
1336
1337#define QIB_DFLT_RCVHDRSIZE 9
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350#define QIB_RCVHDR_ENTSIZE 32
1351
1352int qib_get_user_pages(unsigned long, size_t, struct page **);
1353void qib_release_user_pages(struct page **, size_t);
1354int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1355int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1356u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1357void qib_sendbuf_done(struct qib_devdata *, unsigned);
1358
1359static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1360{
1361 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1362}
1363
1364static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1365{
1366
1367
1368
1369
1370 return (u32) le64_to_cpu(
1371 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr));
1372}
1373
1374static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1375{
1376 const struct qib_devdata *dd = rcd->dd;
1377 u32 hdrqtail;
1378
1379 if (dd->flags & QIB_NODMA_RTAIL) {
1380 __le32 *rhf_addr;
1381 u32 seq;
1382
1383 rhf_addr = (__le32 *) rcd->rcvhdrq +
1384 rcd->head + dd->rhf_offset;
1385 seq = qib_hdrget_seq(rhf_addr);
1386 hdrqtail = rcd->head;
1387 if (seq == rcd->seq_cnt)
1388 hdrqtail++;
1389 } else
1390 hdrqtail = qib_get_rcvhdrtail(rcd);
1391
1392 return hdrqtail;
1393}
1394
1395
1396
1397
1398
1399extern const char ib_qib_version[];
1400extern const struct attribute_group qib_attr_group;
1401
1402int qib_device_create(struct qib_devdata *);
1403void qib_device_remove(struct qib_devdata *);
1404
1405int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1406 struct kobject *kobj);
1407void qib_verbs_unregister_sysfs(struct qib_devdata *);
1408
1409extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1410
1411int __init qib_init_qibfs(void);
1412int __exit qib_exit_qibfs(void);
1413
1414int qibfs_add(struct qib_devdata *);
1415int qibfs_remove(struct qib_devdata *);
1416
1417int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1418int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1419 const struct pci_device_id *);
1420void qib_pcie_ddcleanup(struct qib_devdata *);
1421int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
1422int qib_reinit_intr(struct qib_devdata *);
1423void qib_enable_intx(struct qib_devdata *dd);
1424void qib_nomsi(struct qib_devdata *);
1425void qib_nomsix(struct qib_devdata *);
1426void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1427void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1428
1429u64 qib_int_counter(struct qib_devdata *);
1430
1431u64 qib_sps_ints(void);
1432
1433
1434
1435
1436int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
1437struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
1438
1439
1440
1441
1442
1443static inline void qib_flush_wc(void)
1444{
1445#if defined(CONFIG_X86_64)
1446 asm volatile("sfence" : : : "memory");
1447#else
1448 wmb();
1449#endif
1450}
1451
1452
1453extern unsigned qib_ibmtu;
1454extern ushort qib_cfgctxts;
1455extern ushort qib_num_cfg_vls;
1456extern ushort qib_mini_init;
1457extern unsigned qib_n_krcv_queues;
1458extern unsigned qib_sdma_fetch_arb;
1459extern unsigned qib_compat_ddr_negotiate;
1460extern int qib_special_trigger;
1461extern unsigned qib_numa_aware;
1462
1463extern struct mutex qib_mutex;
1464
1465
1466#define STATUS_TIMEOUT 60
1467
1468#define QIB_DRV_NAME "ib_qib"
1469#define QIB_USER_MINOR_BASE 0
1470#define QIB_TRACE_MINOR 127
1471#define QIB_DIAGPKT_MINOR 128
1472#define QIB_DIAG_MINOR_BASE 129
1473#define QIB_NMINORS 255
1474
1475#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1476#define PCI_VENDOR_ID_QLOGIC 0x1077
1477#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1478#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1479#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490#define qib_early_err(dev, fmt, ...) \
1491 dev_err(dev, fmt, ##__VA_ARGS__)
1492
1493#define qib_dev_err(dd, fmt, ...) \
1494 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1495 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
1496
1497#define qib_dev_warn(dd, fmt, ...) \
1498 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
1499 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
1500
1501#define qib_dev_porterr(dd, port, fmt, ...) \
1502 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1503 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
1504 ##__VA_ARGS__)
1505
1506#define qib_devinfo(pcidev, fmt, ...) \
1507 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
1508
1509
1510
1511
1512struct qib_hwerror_msgs {
1513 u64 mask;
1514 const char *msg;
1515 size_t sz;
1516};
1517
1518#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1519
1520
1521void qib_format_hwerrors(u64 hwerrs,
1522 const struct qib_hwerror_msgs *hwerrmsgs,
1523 size_t nhwerrmsgs, char *msg, size_t lmsg);
1524
1525void qib_stop_send_queue(struct rvt_qp *qp);
1526void qib_quiesce_qp(struct rvt_qp *qp);
1527void qib_flush_qp_waiters(struct rvt_qp *qp);
1528int qib_mtu_to_path_mtu(u32 mtu);
1529u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
1530void qib_notify_error_qp(struct rvt_qp *qp);
1531int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
1532 struct ib_qp_attr *attr);
1533
1534#endif
1535