1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifndef QIB_VERBS_H
36#define QIB_VERBS_H
37
38#include <linux/types.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/interrupt.h>
42#include <linux/kref.h>
43#include <linux/workqueue.h>
44#include <linux/kthread.h>
45#include <linux/completion.h>
46#include <rdma/ib_pack.h>
47#include <rdma/ib_user_verbs.h>
48
49struct qib_ctxtdata;
50struct qib_pportdata;
51struct qib_devdata;
52struct qib_verbs_txreq;
53
54#define QIB_MAX_RDMA_ATOMIC 16
55#define QIB_GUIDS_PER_PORT 5
56
57#define QPN_MAX (1 << 24)
58#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
59
60
61
62
63
64#define QIB_UVERBS_ABI_VERSION 2
65
66
67
68
69
70#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
71
72#define IB_SEQ_NAK (3 << 29)
73
74
75#define IB_RNR_NAK 0x20
76#define IB_NAK_PSN_ERROR 0x60
77#define IB_NAK_INVALID_REQUEST 0x61
78#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
79#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
80#define IB_NAK_INVALID_RD_REQUEST 0x64
81
82
83#define QIB_POST_SEND_OK 0x01
84#define QIB_POST_RECV_OK 0x02
85#define QIB_PROCESS_RECV_OK 0x04
86#define QIB_PROCESS_SEND_OK 0x08
87#define QIB_PROCESS_NEXT_SEND_OK 0x10
88#define QIB_FLUSH_SEND 0x20
89#define QIB_FLUSH_RECV 0x40
90#define QIB_PROCESS_OR_FLUSH_SEND \
91 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
92
93
94#define IB_PMA_SAMPLE_STATUS_DONE 0x00
95#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
96#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
97
98
99#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
100#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
101#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
102#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
103#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
104
105#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
106
107#define IB_BTH_REQ_ACK (1 << 31)
108#define IB_BTH_SOLICITED (1 << 23)
109#define IB_BTH_MIG_REQ (1 << 22)
110
111
112#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
113
114#define IB_GRH_VERSION 6
115#define IB_GRH_VERSION_MASK 0xF
116#define IB_GRH_VERSION_SHIFT 28
117#define IB_GRH_TCLASS_MASK 0xFF
118#define IB_GRH_TCLASS_SHIFT 20
119#define IB_GRH_FLOW_MASK 0xFFFFF
120#define IB_GRH_FLOW_SHIFT 0
121#define IB_GRH_NEXT_HDR 0x1B
122
123#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
124
125
126#define IB_VL_VL0 1
127#define IB_VL_VL0_1 2
128#define IB_VL_VL0_3 3
129#define IB_VL_VL0_7 4
130#define IB_VL_VL0_14 5
131
132static inline int qib_num_vls(int vls)
133{
134 switch (vls) {
135 default:
136 case IB_VL_VL0:
137 return 1;
138 case IB_VL_VL0_1:
139 return 2;
140 case IB_VL_VL0_3:
141 return 4;
142 case IB_VL_VL0_7:
143 return 8;
144 case IB_VL_VL0_14:
145 return 15;
146 }
147}
148
149struct ib_reth {
150 __be64 vaddr;
151 __be32 rkey;
152 __be32 length;
153} __packed;
154
155struct ib_atomic_eth {
156 __be32 vaddr[2];
157 __be32 rkey;
158 __be64 swap_data;
159 __be64 compare_data;
160} __packed;
161
162struct qib_other_headers {
163 __be32 bth[3];
164 union {
165 struct {
166 __be32 deth[2];
167 __be32 imm_data;
168 } ud;
169 struct {
170 struct ib_reth reth;
171 __be32 imm_data;
172 } rc;
173 struct {
174 __be32 aeth;
175 __be32 atomic_ack_eth[2];
176 } at;
177 __be32 imm_data;
178 __be32 aeth;
179 struct ib_atomic_eth atomic_eth;
180 } u;
181} __packed;
182
183
184
185
186
187
188
189struct qib_ib_header {
190 __be16 lrh[4];
191 union {
192 struct {
193 struct ib_grh grh;
194 struct qib_other_headers oth;
195 } l;
196 struct qib_other_headers oth;
197 } u;
198} __packed;
199
200struct qib_pio_header {
201 __le32 pbc[2];
202 struct qib_ib_header hdr;
203} __packed;
204
205
206
207
208
209
210struct qib_mcast_qp {
211 struct list_head list;
212 struct qib_qp *qp;
213};
214
215struct qib_mcast {
216 struct rb_node rb_node;
217 union ib_gid mgid;
218 struct list_head qp_list;
219 wait_queue_head_t wait;
220 atomic_t refcount;
221 int n_attached;
222};
223
224
225struct qib_pd {
226 struct ib_pd ibpd;
227 int user;
228};
229
230
231struct qib_ah {
232 struct ib_ah ibah;
233 struct ib_ah_attr attr;
234 atomic_t refcount;
235};
236
237
238
239
240
241
242struct qib_mmap_info {
243 struct list_head pending_mmaps;
244 struct ib_ucontext *context;
245 void *obj;
246 __u64 offset;
247 struct kref ref;
248 unsigned size;
249};
250
251
252
253
254
255
256struct qib_cq_wc {
257 u32 head;
258 u32 tail;
259 union {
260
261 struct ib_uverbs_wc uqueue[0];
262 struct ib_wc kqueue[0];
263 };
264};
265
266
267
268
269struct qib_cq {
270 struct ib_cq ibcq;
271 struct kthread_work comptask;
272 struct qib_devdata *dd;
273 spinlock_t lock;
274 u8 notify;
275 u8 triggered;
276 struct qib_cq_wc *queue;
277 struct qib_mmap_info *ip;
278};
279
280
281
282
283
284
285struct qib_seg {
286 void *vaddr;
287 size_t length;
288};
289
290
291#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
292
293struct qib_segarray {
294 struct qib_seg segs[QIB_SEGSZ];
295};
296
297struct qib_mregion {
298 struct ib_pd *pd;
299 u64 user_base;
300 u64 iova;
301 size_t length;
302 u32 lkey;
303 u32 offset;
304 int access_flags;
305 u32 max_segs;
306 u32 mapsz;
307 u8 page_shift;
308 u8 lkey_published;
309 struct completion comp;
310 struct rcu_head list;
311 atomic_t refcount;
312 struct qib_segarray *map[0];
313};
314
315
316
317
318
319struct qib_sge {
320 struct qib_mregion *mr;
321 void *vaddr;
322 u32 sge_length;
323 u32 length;
324 u16 m;
325 u16 n;
326};
327
328
329struct qib_mr {
330 struct ib_mr ibmr;
331 struct ib_umem *umem;
332 struct qib_mregion mr;
333};
334
335
336
337
338
339
340struct qib_swqe {
341 struct ib_send_wr wr;
342 u32 psn;
343 u32 lpsn;
344 u32 ssn;
345 u32 length;
346 struct qib_sge sg_list[0];
347};
348
349
350
351
352
353
354struct qib_rwqe {
355 u64 wr_id;
356 u8 num_sge;
357 struct ib_sge sg_list[0];
358};
359
360
361
362
363
364
365
366
367
368struct qib_rwq {
369 u32 head;
370 u32 tail;
371 struct qib_rwqe wq[0];
372};
373
374struct qib_rq {
375 struct qib_rwq *wq;
376 u32 size;
377 u8 max_sge;
378 spinlock_t lock
379 ____cacheline_aligned_in_smp;
380};
381
382struct qib_srq {
383 struct ib_srq ibsrq;
384 struct qib_rq rq;
385 struct qib_mmap_info *ip;
386
387 u32 limit;
388};
389
390struct qib_sge_state {
391 struct qib_sge *sg_list;
392 struct qib_sge sge;
393 u32 total_len;
394 u8 num_sge;
395};
396
397
398
399
400
401struct qib_ack_entry {
402 u8 opcode;
403 u8 sent;
404 u32 psn;
405 u32 lpsn;
406 union {
407 struct qib_sge rdma_sge;
408 u64 atomic_data;
409 };
410};
411
412
413
414
415
416
417
418
419
420struct qib_qp {
421 struct ib_qp ibqp;
422
423 struct ib_ah_attr remote_ah_attr;
424 struct ib_ah_attr alt_ah_attr;
425 struct qib_qp __rcu *next;
426 struct qib_swqe *s_wq;
427 struct qib_mmap_info *ip;
428 struct qib_ib_header *s_hdr;
429 unsigned long timeout_jiffies;
430
431 enum ib_mtu path_mtu;
432 u32 remote_qpn;
433 u32 pmtu;
434 u32 qkey;
435 u32 s_size;
436 u32 s_rnr_timeout;
437
438 u8 state;
439 u8 qp_access_flags;
440 u8 alt_timeout;
441 u8 timeout;
442 u8 s_srate;
443 u8 s_mig_state;
444 u8 port_num;
445 u8 s_pkey_index;
446 u8 s_alt_pkey_index;
447 u8 r_max_rd_atomic;
448 u8 s_max_rd_atomic;
449 u8 s_retry_cnt;
450 u8 s_rnr_retry_cnt;
451 u8 r_min_rnr_timer;
452 u8 s_max_sge;
453 u8 s_draining;
454
455
456
457 atomic_t refcount ____cacheline_aligned_in_smp;
458 wait_queue_head_t wait;
459
460
461 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
462 ____cacheline_aligned_in_smp;
463 struct qib_sge_state s_rdma_read_sge;
464
465 spinlock_t r_lock ____cacheline_aligned_in_smp;
466 unsigned long r_aflags;
467 u64 r_wr_id;
468 u32 r_ack_psn;
469 u32 r_len;
470 u32 r_rcv_len;
471 u32 r_psn;
472 u32 r_msn;
473
474 u8 r_state;
475 u8 r_flags;
476 u8 r_head_ack_queue;
477
478 struct list_head rspwait;
479
480 struct qib_sge_state r_sge;
481 struct qib_rq r_rq;
482
483 spinlock_t s_lock ____cacheline_aligned_in_smp;
484 struct qib_sge_state *s_cur_sge;
485 u32 s_flags;
486 struct qib_verbs_txreq *s_tx;
487 struct qib_swqe *s_wqe;
488 struct qib_sge_state s_sge;
489 struct qib_mregion *s_rdma_mr;
490 atomic_t s_dma_busy;
491 u32 s_cur_size;
492 u32 s_len;
493 u32 s_rdma_read_len;
494 u32 s_next_psn;
495 u32 s_last_psn;
496 u32 s_sending_psn;
497 u32 s_sending_hpsn;
498 u32 s_psn;
499 u32 s_ack_rdma_psn;
500 u32 s_ack_psn;
501 u32 s_head;
502 u32 s_tail;
503 u32 s_cur;
504 u32 s_acked;
505 u32 s_last;
506 u32 s_ssn;
507 u32 s_lsn;
508 u16 s_hdrwords;
509 u16 s_rdma_ack_cnt;
510 u8 s_state;
511 u8 s_ack_state;
512 u8 s_nak_state;
513 u8 r_nak_state;
514 u8 s_retry;
515 u8 s_rnr_retry;
516 u8 s_num_rd_atomic;
517 u8 s_tail_ack_queue;
518
519 struct qib_sge_state s_ack_rdma_sge;
520 struct timer_list s_timer;
521 struct list_head iowait;
522
523 struct work_struct s_work;
524
525 wait_queue_head_t wait_dma;
526
527 struct qib_sge r_sg_list[0]
528 ____cacheline_aligned_in_smp;
529};
530
531
532
533
534#define QIB_R_WRID_VALID 0
535#define QIB_R_REWIND_SGE 1
536
537
538
539
540#define QIB_R_REUSE_SGE 0x01
541#define QIB_R_RDMAR_SEQ 0x02
542#define QIB_R_RSP_NAK 0x04
543#define QIB_R_RSP_SEND 0x08
544#define QIB_R_COMM_EST 0x10
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569#define QIB_S_SIGNAL_REQ_WR 0x0001
570#define QIB_S_BUSY 0x0002
571#define QIB_S_TIMER 0x0004
572#define QIB_S_RESP_PENDING 0x0008
573#define QIB_S_ACK_PENDING 0x0010
574#define QIB_S_WAIT_FENCE 0x0020
575#define QIB_S_WAIT_RDMAR 0x0040
576#define QIB_S_WAIT_RNR 0x0080
577#define QIB_S_WAIT_SSN_CREDIT 0x0100
578#define QIB_S_WAIT_DMA 0x0200
579#define QIB_S_WAIT_PIO 0x0400
580#define QIB_S_WAIT_TX 0x0800
581#define QIB_S_WAIT_DMA_DESC 0x1000
582#define QIB_S_WAIT_KMEM 0x2000
583#define QIB_S_WAIT_PSN 0x4000
584#define QIB_S_WAIT_ACK 0x8000
585#define QIB_S_SEND_ONE 0x10000
586#define QIB_S_UNLIMITED_CREDIT 0x20000
587
588
589
590
591#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
592 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
593
594
595
596
597#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
598 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
599 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
600
601#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
602
603#define QIB_PSN_CREDIT 16
604
605
606
607
608
609static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
610 unsigned n)
611{
612 return (struct qib_swqe *)((char *)qp->s_wq +
613 (sizeof(struct qib_swqe) +
614 qp->s_max_sge *
615 sizeof(struct qib_sge)) * n);
616}
617
618
619
620
621
622static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
623{
624 return (struct qib_rwqe *)
625 ((char *) rq->wq->wq +
626 (sizeof(struct qib_rwqe) +
627 rq->max_sge * sizeof(struct ib_sge)) * n);
628}
629
630
631
632
633
634
635struct qpn_map {
636 void *page;
637};
638
639struct qib_qpn_table {
640 spinlock_t lock;
641 unsigned flags;
642 u32 last;
643 u32 nmaps;
644 u16 limit;
645 u16 mask;
646
647 struct qpn_map map[QPNMAP_ENTRIES];
648};
649
650struct qib_lkey_table {
651 spinlock_t lock;
652 u32 next;
653 u32 gen;
654 u32 max;
655 struct qib_mregion __rcu **table;
656};
657
658struct qib_opcode_stats {
659 u64 n_packets;
660 u64 n_bytes;
661};
662
663struct qib_opcode_stats_perctx {
664 struct qib_opcode_stats stats[128];
665};
666
667struct qib_pma_counters {
668 u64 n_unicast_xmit;
669 u64 n_unicast_rcv;
670 u64 n_multicast_xmit;
671 u64 n_multicast_rcv;
672};
673
674struct qib_ibport {
675 struct qib_qp __rcu *qp0;
676 struct qib_qp __rcu *qp1;
677 struct ib_mad_agent *send_agent;
678 struct qib_ah *sm_ah;
679 struct qib_ah *smi_ah;
680 struct rb_root mcast_tree;
681 spinlock_t lock;
682
683
684 unsigned long mkey_lease_timeout;
685 unsigned long trap_timeout;
686 __be64 gid_prefix;
687 __be64 mkey;
688 __be64 guids[QIB_GUIDS_PER_PORT - 1];
689 u64 tid;
690 struct qib_pma_counters __percpu *pmastats;
691 u64 z_unicast_xmit;
692 u64 z_unicast_rcv;
693 u64 z_multicast_xmit;
694 u64 z_multicast_rcv;
695 u64 z_symbol_error_counter;
696 u64 z_link_error_recovery_counter;
697 u64 z_link_downed_counter;
698 u64 z_port_rcv_errors;
699 u64 z_port_rcv_remphys_errors;
700 u64 z_port_xmit_discards;
701 u64 z_port_xmit_data;
702 u64 z_port_rcv_data;
703 u64 z_port_xmit_packets;
704 u64 z_port_rcv_packets;
705 u32 z_local_link_integrity_errors;
706 u32 z_excessive_buffer_overrun_errors;
707 u32 z_vl15_dropped;
708 u32 n_rc_resends;
709 u32 n_rc_acks;
710 u32 n_rc_qacks;
711 u32 n_rc_delayed_comp;
712 u32 n_seq_naks;
713 u32 n_rdma_seq;
714 u32 n_rnr_naks;
715 u32 n_other_naks;
716 u32 n_loop_pkts;
717 u32 n_pkt_drops;
718 u32 n_vl15_dropped;
719 u32 n_rc_timeouts;
720 u32 n_dmawait;
721 u32 n_unaligned;
722 u32 n_rc_dupreq;
723 u32 n_rc_seqnak;
724 u32 port_cap_flags;
725 u32 pma_sample_start;
726 u32 pma_sample_interval;
727 __be16 pma_counter_select[5];
728 u16 pma_tag;
729 u16 pkey_violations;
730 u16 qkey_violations;
731 u16 mkey_violations;
732 u16 mkey_lease_period;
733 u16 sm_lid;
734 u16 repress_traps;
735 u8 sm_sl;
736 u8 mkeyprot;
737 u8 subnet_timeout;
738 u8 vl_high_limit;
739 u8 sl_to_vl[16];
740
741};
742
743
744struct qib_ibdev {
745 struct ib_device ibdev;
746 struct list_head pending_mmaps;
747 spinlock_t mmap_offset_lock;
748 u32 mmap_offset;
749 struct qib_mregion __rcu *dma_mr;
750
751
752 struct qib_qpn_table qpn_table;
753 struct qib_lkey_table lk_table;
754 struct list_head piowait;
755 struct list_head dmawait;
756 struct list_head txwait;
757 struct list_head memwait;
758 struct list_head txreq_free;
759 struct timer_list mem_timer;
760 struct qib_qp __rcu **qp_table;
761 struct qib_pio_header *pio_hdrs;
762 dma_addr_t pio_hdrs_phys;
763
764 spinlock_t pending_lock;
765 u32 qp_table_size;
766 u32 qp_rnd;
767 spinlock_t qpt_lock;
768
769 u32 n_piowait;
770 u32 n_txwait;
771
772 u32 n_pds_allocated;
773 spinlock_t n_pds_lock;
774 u32 n_ahs_allocated;
775 spinlock_t n_ahs_lock;
776 u32 n_cqs_allocated;
777 spinlock_t n_cqs_lock;
778 u32 n_qps_allocated;
779 spinlock_t n_qps_lock;
780 u32 n_srqs_allocated;
781 spinlock_t n_srqs_lock;
782 u32 n_mcast_grps_allocated;
783 spinlock_t n_mcast_grps_lock;
784#ifdef CONFIG_DEBUG_FS
785
786 struct dentry *qib_ibdev_dbg;
787#endif
788};
789
790struct qib_verbs_counters {
791 u64 symbol_error_counter;
792 u64 link_error_recovery_counter;
793 u64 link_downed_counter;
794 u64 port_rcv_errors;
795 u64 port_rcv_remphys_errors;
796 u64 port_xmit_discards;
797 u64 port_xmit_data;
798 u64 port_rcv_data;
799 u64 port_xmit_packets;
800 u64 port_rcv_packets;
801 u32 local_link_integrity_errors;
802 u32 excessive_buffer_overrun_errors;
803 u32 vl15_dropped;
804};
805
806static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
807{
808 return container_of(ibmr, struct qib_mr, ibmr);
809}
810
811static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
812{
813 return container_of(ibpd, struct qib_pd, ibpd);
814}
815
816static inline struct qib_ah *to_iah(struct ib_ah *ibah)
817{
818 return container_of(ibah, struct qib_ah, ibah);
819}
820
821static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
822{
823 return container_of(ibcq, struct qib_cq, ibcq);
824}
825
826static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
827{
828 return container_of(ibsrq, struct qib_srq, ibsrq);
829}
830
831static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
832{
833 return container_of(ibqp, struct qib_qp, ibqp);
834}
835
836static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
837{
838 return container_of(ibdev, struct qib_ibdev, ibdev);
839}
840
841
842
843
844
845static inline int qib_send_ok(struct qib_qp *qp)
846{
847 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
848 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
849 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
850}
851
852
853
854
855void qib_schedule_send(struct qib_qp *qp);
856
857static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
858{
859 u16 p1 = pkey1 & 0x7FFF;
860 u16 p2 = pkey2 & 0x7FFF;
861
862
863
864
865
866 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
867}
868
869void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
870 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
871void qib_cap_mask_chg(struct qib_ibport *ibp);
872void qib_sys_guid_chg(struct qib_ibport *ibp);
873void qib_node_desc_chg(struct qib_ibport *ibp);
874int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
875 struct ib_wc *in_wc, struct ib_grh *in_grh,
876 struct ib_mad *in_mad, struct ib_mad *out_mad);
877int qib_create_agents(struct qib_ibdev *dev);
878void qib_free_agents(struct qib_ibdev *dev);
879
880
881
882
883
884static inline int qib_cmp24(u32 a, u32 b)
885{
886 return (((int) a) - ((int) b)) << 8;
887}
888
889struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
890
891int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
892 u64 *rwords, u64 *spkts, u64 *rpkts,
893 u64 *xmit_wait);
894
895int qib_get_counters(struct qib_pportdata *ppd,
896 struct qib_verbs_counters *cntrs);
897
898int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
899
900int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
901
902int qib_mcast_tree_empty(struct qib_ibport *ibp);
903
904__be32 qib_compute_aeth(struct qib_qp *qp);
905
906struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
907
908struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
909 struct ib_qp_init_attr *init_attr,
910 struct ib_udata *udata);
911
912int qib_destroy_qp(struct ib_qp *ibqp);
913
914int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
915
916int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
917 int attr_mask, struct ib_udata *udata);
918
919int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
920 int attr_mask, struct ib_qp_init_attr *init_attr);
921
922unsigned qib_free_all_qps(struct qib_devdata *dd);
923
924void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
925
926void qib_free_qpn_table(struct qib_qpn_table *qpt);
927
928#ifdef CONFIG_DEBUG_FS
929
930struct qib_qp_iter;
931
932struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
933
934int qib_qp_iter_next(struct qib_qp_iter *iter);
935
936void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
937
938#endif
939
940void qib_get_credit(struct qib_qp *qp, u32 aeth);
941
942unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
943
944void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
945
946void qib_put_txreq(struct qib_verbs_txreq *tx);
947
948int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
949 u32 hdrwords, struct qib_sge_state *ss, u32 len);
950
951void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
952 int release);
953
954void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
955
956void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
957 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
958
959void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
960 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
961
962int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
963
964struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
965
966void qib_rc_rnr_retry(unsigned long arg);
967
968void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
969
970void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
971
972int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
973
974void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
975 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
976
977int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
978
979void qib_free_lkey(struct qib_mregion *mr);
980
981int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
982 struct qib_sge *isge, struct ib_sge *sge, int acc);
983
984int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
985 u32 len, u64 vaddr, u32 rkey, int acc);
986
987int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
988 struct ib_recv_wr **bad_wr);
989
990struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
991 struct ib_srq_init_attr *srq_init_attr,
992 struct ib_udata *udata);
993
994int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
995 enum ib_srq_attr_mask attr_mask,
996 struct ib_udata *udata);
997
998int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
999
1000int qib_destroy_srq(struct ib_srq *ibsrq);
1001
1002int qib_cq_init(struct qib_devdata *dd);
1003
1004void qib_cq_exit(struct qib_devdata *dd);
1005
1006void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
1007
1008int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1009
1010struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
1011 int comp_vector, struct ib_ucontext *context,
1012 struct ib_udata *udata);
1013
1014int qib_destroy_cq(struct ib_cq *ibcq);
1015
1016int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1017
1018int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
1019
1020struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
1021
1022struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
1023 struct ib_phys_buf *buffer_list,
1024 int num_phys_buf, int acc, u64 *iova_start);
1025
1026struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1027 u64 virt_addr, int mr_access_flags,
1028 struct ib_udata *udata);
1029
1030int qib_dereg_mr(struct ib_mr *ibmr);
1031
1032struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
1033
1034struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
1035 struct ib_device *ibdev, int page_list_len);
1036
1037void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1038
1039int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
1040
1041struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1042 struct ib_fmr_attr *fmr_attr);
1043
1044int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
1045 int list_len, u64 iova);
1046
1047int qib_unmap_fmr(struct list_head *fmr_list);
1048
1049int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1050
1051static inline void qib_get_mr(struct qib_mregion *mr)
1052{
1053 atomic_inc(&mr->refcount);
1054}
1055
1056void mr_rcu_callback(struct rcu_head *list);
1057
1058static inline void qib_put_mr(struct qib_mregion *mr)
1059{
1060 if (unlikely(atomic_dec_and_test(&mr->refcount)))
1061 call_rcu(&mr->list, mr_rcu_callback);
1062}
1063
1064static inline void qib_put_ss(struct qib_sge_state *ss)
1065{
1066 while (ss->num_sge) {
1067 qib_put_mr(ss->sge.mr);
1068 if (--ss->num_sge)
1069 ss->sge = *ss->sg_list++;
1070 }
1071}
1072
1073
1074void qib_release_mmap_info(struct kref *ref);
1075
1076struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1077 struct ib_ucontext *context,
1078 void *obj);
1079
1080void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1081 u32 size, void *obj);
1082
1083int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1084
1085int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1086
1087void qib_migrate_qp(struct qib_qp *qp);
1088
1089int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1090 int has_grh, struct qib_qp *qp, u32 bth0);
1091
1092u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1093 struct ib_global_route *grh, u32 hwords, u32 nwords);
1094
1095void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1096 u32 bth0, u32 bth2);
1097
1098void qib_do_send(struct work_struct *work);
1099
1100void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1101 enum ib_wc_status status);
1102
1103void qib_send_rc_ack(struct qib_qp *qp);
1104
1105int qib_make_rc_req(struct qib_qp *qp);
1106
1107int qib_make_uc_req(struct qib_qp *qp);
1108
1109int qib_make_ud_req(struct qib_qp *qp);
1110
1111int qib_register_ib_device(struct qib_devdata *);
1112
1113void qib_unregister_ib_device(struct qib_devdata *);
1114
1115void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1116
1117void qib_ib_piobufavail(struct qib_devdata *);
1118
1119unsigned qib_get_npkeys(struct qib_devdata *);
1120
1121unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1122
1123extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1124
1125
1126
1127
1128
1129#define IB_PHYSPORTSTATE_SLEEP 1
1130#define IB_PHYSPORTSTATE_POLL 2
1131#define IB_PHYSPORTSTATE_DISABLED 3
1132#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1133#define IB_PHYSPORTSTATE_LINKUP 5
1134#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1135#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1136#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1137#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1138#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1139#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1140#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1141#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1142
1143extern const int ib_qib_state_ops[];
1144
1145extern __be64 ib_qib_sys_image_guid;
1146
1147extern unsigned int ib_qib_lkey_table_size;
1148
1149extern unsigned int ib_qib_max_cqes;
1150
1151extern unsigned int ib_qib_max_cqs;
1152
1153extern unsigned int ib_qib_max_qp_wrs;
1154
1155extern unsigned int ib_qib_max_qps;
1156
1157extern unsigned int ib_qib_max_sges;
1158
1159extern unsigned int ib_qib_max_mcast_grps;
1160
1161extern unsigned int ib_qib_max_mcast_qp_attached;
1162
1163extern unsigned int ib_qib_max_srqs;
1164
1165extern unsigned int ib_qib_max_srq_sges;
1166
1167extern unsigned int ib_qib_max_srq_wrs;
1168
1169extern const u32 ib_qib_rnr_table[];
1170
1171extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1172
1173#endif
1174