1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef _HNS_ROCE_DEVICE_H
34#define _HNS_ROCE_DEVICE_H
35
36#include <rdma/ib_verbs.h>
37
38#define DRV_NAME "hns_roce"
39
40#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
41
42#define MAC_ADDR_OCTET_NUM 6
43#define HNS_ROCE_MAX_MSG_LEN 0x80000000
44
45#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
46
47#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
48
49#define HNS_ROCE_BA_SIZE (32 * 4096)
50
51
52#define HNS_ROCE_MIN_CQE_NUM 0x40
53#define HNS_ROCE_MIN_WQE_NUM 0x20
54
55
56#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
57#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
58
59#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
60#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
61 (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
62#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
63#define HNS_ROCE_MIN_CQE_CNT 16
64
65#define HNS_ROCE_MAX_IRQ_NUM 128
66
67#define EQ_ENABLE 1
68#define EQ_DISABLE 0
69
70#define HNS_ROCE_CEQ 0
71#define HNS_ROCE_AEQ 1
72
73#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
74#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
75
76
77#define HNS_ROCE_SL_SHIFT 28
78#define HNS_ROCE_TCLASS_SHIFT 20
79#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
80
81#define HNS_ROCE_MAX_PORTS 6
82#define HNS_ROCE_MAX_GID_NUM 16
83#define HNS_ROCE_GID_SIZE 16
84
85#define HNS_ROCE_HOP_NUM_0 0xff
86
87#define BITMAP_NO_RR 0
88#define BITMAP_RR 1
89
90#define MR_TYPE_MR 0x00
91#define MR_TYPE_DMA 0x03
92
93#define PKEY_ID 0xffff
94#define GUID_LEN 8
95#define NODE_DESC_SIZE 64
96#define DB_REG_OFFSET 0x1000
97
98#define SERV_TYPE_RC 0
99#define SERV_TYPE_RD 1
100#define SERV_TYPE_UC 2
101#define SERV_TYPE_UD 3
102
103#define PAGES_SHIFT_8 8
104#define PAGES_SHIFT_16 16
105#define PAGES_SHIFT_24 24
106#define PAGES_SHIFT_32 32
107
108enum {
109 HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
110};
111
112enum {
113 HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
114};
115
116enum hns_roce_qp_state {
117 HNS_ROCE_QP_STATE_RST,
118 HNS_ROCE_QP_STATE_INIT,
119 HNS_ROCE_QP_STATE_RTR,
120 HNS_ROCE_QP_STATE_RTS,
121 HNS_ROCE_QP_STATE_SQD,
122 HNS_ROCE_QP_STATE_ERR,
123 HNS_ROCE_QP_NUM_STATE,
124};
125
126enum hns_roce_event {
127 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
128 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
129 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
130 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
131 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
132 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
133 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
134 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
135 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
136 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
137 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
138 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
139 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
140 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
141
142 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
143 HNS_ROCE_EVENT_TYPE_MB = 0x13,
144 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
145 HNS_ROCE_EVENT_TYPE_FLR = 0x15,
146};
147
148
149enum {
150 HNS_ROCE_LWQCE_QPC_ERROR = 1,
151 HNS_ROCE_LWQCE_MTU_ERROR = 2,
152 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
153 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
154 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
155 HNS_ROCE_LWQCE_SL_ERROR = 6,
156 HNS_ROCE_LWQCE_PORT_ERROR = 7,
157};
158
159
160enum {
161 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
162 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
163 HNS_ROCE_LAVWQE_VA_ERROR = 3,
164 HNS_ROCE_LAVWQE_PD_ERROR = 4,
165 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
166 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
167 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
168};
169
170
171enum {
172 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
173 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
174 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
175 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
176 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
177 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
178};
179
180enum {
181
182 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
183 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
184};
185
186enum {
187 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
188 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
189 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
190 HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3)
191};
192
193enum hns_roce_mtt_type {
194 MTT_TYPE_WQE,
195 MTT_TYPE_CQE,
196};
197
198enum {
199 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
200};
201
202#define HNS_ROCE_CMD_SUCCESS 1
203
204#define HNS_ROCE_PORT_DOWN 0
205#define HNS_ROCE_PORT_UP 1
206
207#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
208
209#define PAGE_ADDR_SHIFT 12
210
211struct hns_roce_uar {
212 u64 pfn;
213 unsigned long index;
214};
215
216struct hns_roce_ucontext {
217 struct ib_ucontext ibucontext;
218 struct hns_roce_uar uar;
219 struct list_head page_list;
220 struct mutex page_mutex;
221};
222
223struct hns_roce_pd {
224 struct ib_pd ibpd;
225 unsigned long pdn;
226};
227
228struct hns_roce_bitmap {
229
230 unsigned long last;
231 unsigned long top;
232 unsigned long max;
233 unsigned long reserved_top;
234 unsigned long mask;
235 spinlock_t lock;
236 unsigned long *table;
237};
238
239
240
241
242
243
244
245
246struct hns_roce_buddy {
247
248 unsigned long **bits;
249
250 u32 *num_free;
251 int max_order;
252 spinlock_t lock;
253};
254
255
256struct hns_roce_hem_table {
257
258 u32 type;
259
260 unsigned long num_hem;
261
262 unsigned long num_obj;
263
264 unsigned long obj_size;
265 unsigned long table_chunk_size;
266 int lowmem;
267 struct mutex mutex;
268 struct hns_roce_hem **hem;
269 u64 **bt_l1;
270 dma_addr_t *bt_l1_dma_addr;
271 u64 **bt_l0;
272 dma_addr_t *bt_l0_dma_addr;
273};
274
275struct hns_roce_mtt {
276 unsigned long first_seg;
277 int order;
278 int page_shift;
279 enum hns_roce_mtt_type mtt_type;
280};
281
282
283#define MR_SIZE_4K 0
284
285struct hns_roce_mr {
286 struct ib_mr ibmr;
287 struct ib_umem *umem;
288 u64 iova;
289 u64 size;
290 u32 key;
291 u32 pd;
292 u32 access;
293 int enabled;
294 int type;
295 u64 *pbl_buf;
296 dma_addr_t pbl_dma_addr;
297 u32 pbl_size;
298 u64 pbl_ba;
299 u32 l0_chunk_last_num;
300 u32 l1_chunk_last_num;
301 u64 **pbl_bt_l2;
302 u64 **pbl_bt_l1;
303 u64 *pbl_bt_l0;
304 dma_addr_t *pbl_l2_dma_addr;
305 dma_addr_t *pbl_l1_dma_addr;
306 dma_addr_t pbl_l0_dma_addr;
307 u32 pbl_ba_pg_sz;
308 u32 pbl_buf_pg_sz;
309 u32 pbl_hop_num;
310};
311
312struct hns_roce_mr_table {
313 struct hns_roce_bitmap mtpt_bitmap;
314 struct hns_roce_buddy mtt_buddy;
315 struct hns_roce_hem_table mtt_table;
316 struct hns_roce_hem_table mtpt_table;
317 struct hns_roce_buddy mtt_cqe_buddy;
318 struct hns_roce_hem_table mtt_cqe_table;
319};
320
321struct hns_roce_wq {
322 u64 *wrid;
323 spinlock_t lock;
324 int wqe_cnt;
325 u32 max_post;
326 int max_gs;
327 int offset;
328 int wqe_shift;
329 u32 head;
330 u32 tail;
331 void __iomem *db_reg_l;
332};
333
334struct hns_roce_sge {
335 int sge_cnt;
336 int offset;
337 int sge_shift;
338};
339
340struct hns_roce_buf_list {
341 void *buf;
342 dma_addr_t map;
343};
344
345struct hns_roce_buf {
346 struct hns_roce_buf_list direct;
347 struct hns_roce_buf_list *page_list;
348 int nbufs;
349 u32 npages;
350 int page_shift;
351};
352
353struct hns_roce_db_pgdir {
354 struct list_head list;
355 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
356 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2);
357 unsigned long *bits[2];
358 u32 *page;
359 dma_addr_t db_dma;
360};
361
362struct hns_roce_user_db_page {
363 struct list_head list;
364 struct ib_umem *umem;
365 unsigned long user_virt;
366 refcount_t refcount;
367};
368
369struct hns_roce_db {
370 u32 *db_record;
371 union {
372 struct hns_roce_db_pgdir *pgdir;
373 struct hns_roce_user_db_page *user_page;
374 } u;
375 dma_addr_t dma;
376 int index;
377 int order;
378};
379
380struct hns_roce_cq_buf {
381 struct hns_roce_buf hr_buf;
382 struct hns_roce_mtt hr_mtt;
383};
384
385struct hns_roce_cq {
386 struct ib_cq ib_cq;
387 struct hns_roce_cq_buf hr_buf;
388 struct hns_roce_db db;
389 u8 db_en;
390 spinlock_t lock;
391 struct ib_umem *umem;
392 void (*comp)(struct hns_roce_cq *cq);
393 void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
394
395 struct hns_roce_uar *uar;
396 u32 cq_depth;
397 u32 cons_index;
398 u32 *set_ci_db;
399 void __iomem *cq_db_l;
400 u16 *tptr_addr;
401 int arm_sn;
402 unsigned long cqn;
403 u32 vector;
404 atomic_t refcount;
405 struct completion free;
406};
407
408struct hns_roce_srq {
409 struct ib_srq ibsrq;
410 int srqn;
411};
412
413struct hns_roce_uar_table {
414 struct hns_roce_bitmap bitmap;
415};
416
417struct hns_roce_qp_table {
418 struct hns_roce_bitmap bitmap;
419 spinlock_t lock;
420 struct hns_roce_hem_table qp_table;
421 struct hns_roce_hem_table irrl_table;
422 struct hns_roce_hem_table trrl_table;
423};
424
425struct hns_roce_cq_table {
426 struct hns_roce_bitmap bitmap;
427 spinlock_t lock;
428 struct radix_tree_root tree;
429 struct hns_roce_hem_table table;
430};
431
432struct hns_roce_raq_table {
433 struct hns_roce_buf_list *e_raq_buf;
434};
435
436struct hns_roce_av {
437 __le32 port_pd;
438 u8 gid_index;
439 u8 stat_rate;
440 u8 hop_limit;
441 __le32 sl_tclass_flowlabel;
442 u8 dgid[HNS_ROCE_GID_SIZE];
443 u8 mac[6];
444 __le16 vlan;
445};
446
447struct hns_roce_ah {
448 struct ib_ah ibah;
449 struct hns_roce_av av;
450};
451
452struct hns_roce_cmd_context {
453 struct completion done;
454 int result;
455 int next;
456 u64 out_param;
457 u16 token;
458};
459
460struct hns_roce_cmdq {
461 struct dma_pool *pool;
462 struct mutex hcr_mutex;
463 struct semaphore poll_sem;
464
465
466
467
468 struct semaphore event_sem;
469 int max_cmds;
470 spinlock_t context_lock;
471 int free_head;
472 struct hns_roce_cmd_context *context;
473
474
475
476
477 u16 token_mask;
478
479
480
481
482
483
484 u8 use_events;
485 u8 toggle;
486};
487
488struct hns_roce_cmd_mailbox {
489 void *buf;
490 dma_addr_t dma;
491};
492
493struct hns_roce_dev;
494
495struct hns_roce_rinl_sge {
496 void *addr;
497 u32 len;
498};
499
500struct hns_roce_rinl_wqe {
501 struct hns_roce_rinl_sge *sg_list;
502 u32 sge_cnt;
503};
504
505struct hns_roce_rinl_buf {
506 struct hns_roce_rinl_wqe *wqe_list;
507 u32 wqe_cnt;
508};
509
510struct hns_roce_qp {
511 struct ib_qp ibqp;
512 struct hns_roce_buf hr_buf;
513 struct hns_roce_wq rq;
514 struct hns_roce_db rdb;
515 u8 rdb_en;
516 u32 doorbell_qpn;
517 __le32 sq_signal_bits;
518 u32 sq_next_wqe;
519 int sq_max_wqes_per_wr;
520 int sq_spare_wqes;
521 struct hns_roce_wq sq;
522
523 struct ib_umem *umem;
524 struct hns_roce_mtt mtt;
525 u32 buff_size;
526 struct mutex mutex;
527 u8 port;
528 u8 phy_port;
529 u8 sl;
530 u8 resp_depth;
531 u8 state;
532 u32 access_flags;
533 u32 atomic_rd_en;
534 u32 pkey_index;
535 u32 qkey;
536 void (*event)(struct hns_roce_qp *qp,
537 enum hns_roce_event event_type);
538 unsigned long qpn;
539
540 atomic_t refcount;
541 struct completion free;
542
543 struct hns_roce_sge sge;
544 u32 next_sge;
545
546 struct hns_roce_rinl_buf rq_inl_buf;
547};
548
549struct hns_roce_sqp {
550 struct hns_roce_qp hr_qp;
551};
552
553struct hns_roce_ib_iboe {
554 spinlock_t lock;
555 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
556 struct notifier_block nb;
557 u8 phy_port[HNS_ROCE_MAX_PORTS];
558};
559
560enum {
561 HNS_ROCE_EQ_STAT_INVALID = 0,
562 HNS_ROCE_EQ_STAT_VALID = 2,
563};
564
565struct hns_roce_ceqe {
566 u32 comp;
567};
568
569struct hns_roce_aeqe {
570 u32 asyn;
571 union {
572 struct {
573 u32 qp;
574 u32 rsv0;
575 u32 rsv1;
576 } qp_event;
577
578 struct {
579 u32 cq;
580 u32 rsv0;
581 u32 rsv1;
582 } cq_event;
583
584 struct {
585 u32 ceqe;
586 u32 rsv0;
587 u32 rsv1;
588 } ce_event;
589
590 struct {
591 __le64 out_param;
592 __le16 token;
593 u8 status;
594 u8 rsv0;
595 } __packed cmd;
596 } event;
597};
598
599struct hns_roce_eq {
600 struct hns_roce_dev *hr_dev;
601 void __iomem *doorbell;
602
603 int type_flag;
604 int eqn;
605 u32 entries;
606 int log_entries;
607 int eqe_size;
608 int irq;
609 int log_page_size;
610 int cons_index;
611 struct hns_roce_buf_list *buf_list;
612 int over_ignore;
613 int coalesce;
614 int arm_st;
615 u64 eqe_ba;
616 int eqe_ba_pg_sz;
617 int eqe_buf_pg_sz;
618 int hop_num;
619 u64 *bt_l0;
620 u64 **bt_l1;
621 u64 **buf;
622 dma_addr_t l0_dma;
623 dma_addr_t *l1_dma;
624 dma_addr_t *buf_dma;
625 u32 l0_last_num;
626 u32 l1_last_num;
627 int eq_max_cnt;
628 int eq_period;
629 int shift;
630 dma_addr_t cur_eqe_ba;
631 dma_addr_t nxt_eqe_ba;
632};
633
634struct hns_roce_eq_table {
635 struct hns_roce_eq *eq;
636 void __iomem **eqc_base;
637};
638
639struct hns_roce_caps {
640 u8 num_ports;
641 int gid_table_len[HNS_ROCE_MAX_PORTS];
642 int pkey_table_len[HNS_ROCE_MAX_PORTS];
643 int local_ca_ack_delay;
644 int num_uars;
645 u32 phy_num_uars;
646 u32 max_sq_sg;
647 u32 max_sq_inline;
648 u32 max_rq_sg;
649 int num_qps;
650 u32 max_wqes;
651 u32 max_sq_desc_sz;
652 u32 max_rq_desc_sz;
653 u32 max_srq_desc_sz;
654 int max_qp_init_rdma;
655 int max_qp_dest_rdma;
656 int num_cqs;
657 int max_cqes;
658 int min_cqes;
659 u32 min_wqes;
660 int reserved_cqs;
661 int num_aeq_vectors;
662 int num_comp_vectors;
663 int num_other_vectors;
664 int num_mtpts;
665 u32 num_mtt_segs;
666 u32 num_cqe_segs;
667 int reserved_mrws;
668 int reserved_uars;
669 int num_pds;
670 int reserved_pds;
671 u32 mtt_entry_sz;
672 u32 cq_entry_sz;
673 u32 page_size_cap;
674 u32 reserved_lkey;
675 int mtpt_entry_sz;
676 int qpc_entry_sz;
677 int irrl_entry_sz;
678 int trrl_entry_sz;
679 int cqc_entry_sz;
680 u32 pbl_ba_pg_sz;
681 u32 pbl_buf_pg_sz;
682 u32 pbl_hop_num;
683 int aeqe_depth;
684 int ceqe_depth;
685 enum ib_mtu max_mtu;
686 u32 qpc_bt_num;
687 u32 srqc_bt_num;
688 u32 cqc_bt_num;
689 u32 mpt_bt_num;
690 u32 qpc_ba_pg_sz;
691 u32 qpc_buf_pg_sz;
692 u32 qpc_hop_num;
693 u32 srqc_ba_pg_sz;
694 u32 srqc_buf_pg_sz;
695 u32 srqc_hop_num;
696 u32 cqc_ba_pg_sz;
697 u32 cqc_buf_pg_sz;
698 u32 cqc_hop_num;
699 u32 mpt_ba_pg_sz;
700 u32 mpt_buf_pg_sz;
701 u32 mpt_hop_num;
702 u32 mtt_ba_pg_sz;
703 u32 mtt_buf_pg_sz;
704 u32 mtt_hop_num;
705 u32 cqe_ba_pg_sz;
706 u32 cqe_buf_pg_sz;
707 u32 cqe_hop_num;
708 u32 eqe_ba_pg_sz;
709 u32 eqe_buf_pg_sz;
710 u32 eqe_hop_num;
711 u32 chunk_sz;
712 u64 flags;
713};
714
715struct hns_roce_hw {
716 int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
717 int (*cmq_init)(struct hns_roce_dev *hr_dev);
718 void (*cmq_exit)(struct hns_roce_dev *hr_dev);
719 int (*hw_profile)(struct hns_roce_dev *hr_dev);
720 int (*hw_init)(struct hns_roce_dev *hr_dev);
721 void (*hw_exit)(struct hns_roce_dev *hr_dev);
722 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
723 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
724 u16 token, int event);
725 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
726 int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
727 union ib_gid *gid, const struct ib_gid_attr *attr);
728 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
729 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
730 enum ib_mtu mtu);
731 int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
732 unsigned long mtpt_idx);
733 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
734 struct hns_roce_mr *mr, int flags, u32 pdn,
735 int mr_access_flags, u64 iova, u64 size,
736 void *mb_buf);
737 void (*write_cqc)(struct hns_roce_dev *hr_dev,
738 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
739 dma_addr_t dma_handle, int nent, u32 vector);
740 int (*set_hem)(struct hns_roce_dev *hr_dev,
741 struct hns_roce_hem_table *table, int obj, int step_idx);
742 int (*clear_hem)(struct hns_roce_dev *hr_dev,
743 struct hns_roce_hem_table *table, int obj,
744 int step_idx);
745 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
746 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
747 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
748 int attr_mask, enum ib_qp_state cur_state,
749 enum ib_qp_state new_state);
750 int (*destroy_qp)(struct ib_qp *ibqp);
751 int (*post_send)(struct ib_qp *ibqp, struct ib_send_wr *wr,
752 struct ib_send_wr **bad_wr);
753 int (*post_recv)(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
754 struct ib_recv_wr **bad_recv_wr);
755 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
756 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
757 int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
758 int (*destroy_cq)(struct ib_cq *ibcq);
759 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
760 int (*init_eq)(struct hns_roce_dev *hr_dev);
761 void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
762};
763
764struct hns_roce_dev {
765 struct ib_device ib_dev;
766 struct platform_device *pdev;
767 struct pci_dev *pci_dev;
768 struct device *dev;
769 struct hns_roce_uar priv_uar;
770 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
771 spinlock_t sm_lock;
772 spinlock_t bt_cmd_lock;
773 struct hns_roce_ib_iboe iboe;
774
775 struct list_head pgdir_list;
776 struct mutex pgdir_mutex;
777 int irq[HNS_ROCE_MAX_IRQ_NUM];
778 u8 __iomem *reg_base;
779 struct hns_roce_caps caps;
780 struct radix_tree_root qp_table_tree;
781
782 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM];
783 u64 sys_image_guid;
784 u32 vendor_id;
785 u32 vendor_part_id;
786 u32 hw_rev;
787 void __iomem *priv_addr;
788
789 struct hns_roce_cmdq cmd;
790 struct hns_roce_bitmap pd_bitmap;
791 struct hns_roce_uar_table uar_table;
792 struct hns_roce_mr_table mr_table;
793 struct hns_roce_cq_table cq_table;
794 struct hns_roce_qp_table qp_table;
795 struct hns_roce_eq_table eq_table;
796
797 int cmd_mod;
798 int loop_idc;
799 u32 sdb_offset;
800 u32 odb_offset;
801 dma_addr_t tptr_dma_addr;
802 u32 tptr_size;
803 const struct hns_roce_hw *hw;
804 void *priv;
805};
806
807static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
808{
809 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
810}
811
812static inline struct hns_roce_ucontext
813 *to_hr_ucontext(struct ib_ucontext *ibucontext)
814{
815 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
816}
817
818static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
819{
820 return container_of(ibpd, struct hns_roce_pd, ibpd);
821}
822
823static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
824{
825 return container_of(ibah, struct hns_roce_ah, ibah);
826}
827
828static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
829{
830 return container_of(ibmr, struct hns_roce_mr, ibmr);
831}
832
833static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
834{
835 return container_of(ibqp, struct hns_roce_qp, ibqp);
836}
837
838static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
839{
840 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
841}
842
843static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
844{
845 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
846}
847
848static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
849{
850 return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
851}
852
853static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
854{
855 __raw_writeq(*(u64 *) val, dest);
856}
857
858static inline struct hns_roce_qp
859 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
860{
861 return radix_tree_lookup(&hr_dev->qp_table_tree,
862 qpn & (hr_dev->caps.num_qps - 1));
863}
864
865static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
866{
867 u32 page_size = 1 << buf->page_shift;
868
869 if (buf->nbufs == 1)
870 return (char *)(buf->direct.buf) + offset;
871 else
872 return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
873 (offset & (page_size - 1));
874}
875
876int hns_roce_init_uar_table(struct hns_roce_dev *dev);
877int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
878void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
879void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
880
881int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
882void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
883void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
884 u64 out_param);
885int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
886void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
887
888int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
889 struct hns_roce_mtt *mtt);
890void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
891 struct hns_roce_mtt *mtt);
892int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
893 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
894
895int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
896int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
897int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev);
898int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
899int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
900
901void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
902void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
903void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
904void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
905void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
906
907int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
908void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
909 int rr);
910int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
911 u32 reserved_bot, u32 resetrved_top);
912void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
913void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
914int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
915 int align, unsigned long *obj);
916void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
917 unsigned long obj, int cnt,
918 int rr);
919
920struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
921 struct rdma_ah_attr *ah_attr,
922 struct ib_udata *udata);
923int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
924int hns_roce_destroy_ah(struct ib_ah *ah);
925
926struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
927 struct ib_ucontext *context,
928 struct ib_udata *udata);
929int hns_roce_dealloc_pd(struct ib_pd *pd);
930
931struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
932struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
933 u64 virt_addr, int access_flags,
934 struct ib_udata *udata);
935int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
936 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
937 struct ib_udata *udata);
938int hns_roce_dereg_mr(struct ib_mr *ibmr);
939int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
940 struct hns_roce_cmd_mailbox *mailbox,
941 unsigned long mpt_index);
942unsigned long key_to_hw_index(u32 key);
943
944void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
945 struct hns_roce_buf *buf);
946int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
947 struct hns_roce_buf *buf, u32 page_shift);
948
949int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
950 struct hns_roce_mtt *mtt, struct ib_umem *umem);
951
952struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
953 struct ib_qp_init_attr *init_attr,
954 struct ib_udata *udata);
955int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
956 int attr_mask, struct ib_udata *udata);
957void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
958void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
959void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
960bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
961 struct ib_cq *ib_cq);
962enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
963void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
964 struct hns_roce_cq *recv_cq);
965void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
966 struct hns_roce_cq *recv_cq);
967void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
968void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
969void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
970 int cnt);
971__be32 send_ieth(struct ib_send_wr *wr);
972int to_hr_qp_type(int qp_type);
973
974struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
975 const struct ib_cq_init_attr *attr,
976 struct ib_ucontext *context,
977 struct ib_udata *udata);
978
979int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
980void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
981
982int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
983 struct hns_roce_db *db);
984void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
985 struct hns_roce_db *db);
986int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
987 int order);
988void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
989
990void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
991void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
992void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
993int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
994int hns_roce_init(struct hns_roce_dev *hr_dev);
995void hns_roce_exit(struct hns_roce_dev *hr_dev);
996
997#endif
998