1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/spinlock_types.h>
40#include <linux/semaphore.h>
41#include <linux/slab.h>
42#include <linux/vmalloc.h>
43#include <linux/radix-tree.h>
44#include <linux/workqueue.h>
45#include <linux/mempool.h>
46#include <linux/interrupt.h>
47#include <linux/idr.h>
48
49#include <linux/mlx5/device.h>
50#include <linux/mlx5/doorbell.h>
51#include <linux/mlx5/srq.h>
52#include <linux/timecounter.h>
53#include <linux/ptp_clock_kernel.h>
54
55enum {
56 MLX5_BOARD_ID_LEN = 64,
57 MLX5_MAX_NAME_LEN = 16,
58};
59
60enum {
61
62
63
64 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
65 MLX5_CMD_WQ_MAX_NAME = 32,
66};
67
68enum {
69 CMD_OWNER_SW = 0x0,
70 CMD_OWNER_HW = 0x1,
71 CMD_STATUS_SUCCESS = 0,
72};
73
74enum mlx5_sqp_t {
75 MLX5_SQP_SMI = 0,
76 MLX5_SQP_GSI = 1,
77 MLX5_SQP_IEEE_1588 = 2,
78 MLX5_SQP_SNIFFER = 3,
79 MLX5_SQP_SYNC_UMR = 4,
80};
81
82enum {
83 MLX5_MAX_PORTS = 2,
84};
85
86enum {
87 MLX5_EQ_VEC_PAGES = 0,
88 MLX5_EQ_VEC_CMD = 1,
89 MLX5_EQ_VEC_ASYNC = 2,
90 MLX5_EQ_VEC_PFAULT = 3,
91 MLX5_EQ_VEC_COMP_BASE,
92};
93
94enum {
95 MLX5_MAX_IRQ_NAME = 32
96};
97
98enum {
99 MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
100 MLX5_ATOMIC_MODE_CX = 2 << 16,
101 MLX5_ATOMIC_MODE_8B = 3 << 16,
102 MLX5_ATOMIC_MODE_16B = 4 << 16,
103 MLX5_ATOMIC_MODE_32B = 5 << 16,
104 MLX5_ATOMIC_MODE_64B = 6 << 16,
105 MLX5_ATOMIC_MODE_128B = 7 << 16,
106 MLX5_ATOMIC_MODE_256B = 8 << 16,
107};
108
109enum {
110 MLX5_REG_QETCR = 0x4005,
111 MLX5_REG_QTCT = 0x400a,
112 MLX5_REG_DCBX_PARAM = 0x4020,
113 MLX5_REG_DCBX_APP = 0x4021,
114 MLX5_REG_FPGA_CAP = 0x4022,
115 MLX5_REG_FPGA_CTRL = 0x4023,
116 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
117 MLX5_REG_PCAP = 0x5001,
118 MLX5_REG_PMTU = 0x5003,
119 MLX5_REG_PTYS = 0x5004,
120 MLX5_REG_PAOS = 0x5006,
121 MLX5_REG_PFCC = 0x5007,
122 MLX5_REG_PPCNT = 0x5008,
123 MLX5_REG_PMAOS = 0x5012,
124 MLX5_REG_PUDE = 0x5009,
125 MLX5_REG_PMPE = 0x5010,
126 MLX5_REG_PELC = 0x500e,
127 MLX5_REG_PVLC = 0x500f,
128 MLX5_REG_PCMR = 0x5041,
129 MLX5_REG_PMLP = 0x5002,
130 MLX5_REG_PCAM = 0x507f,
131 MLX5_REG_NODE_DESC = 0x6001,
132 MLX5_REG_HOST_ENDIANNESS = 0x7004,
133 MLX5_REG_MCIA = 0x9014,
134 MLX5_REG_MLCR = 0x902b,
135 MLX5_REG_MPCNT = 0x9051,
136 MLX5_REG_MTPPS = 0x9053,
137 MLX5_REG_MTPPSE = 0x9054,
138 MLX5_REG_MCQI = 0x9061,
139 MLX5_REG_MCC = 0x9062,
140 MLX5_REG_MCDA = 0x9063,
141 MLX5_REG_MCAM = 0x907f,
142};
143
144enum mlx5_dcbx_oper_mode {
145 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
146 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
147};
148
149enum {
150 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
151 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
152};
153
154enum mlx5_page_fault_resume_flags {
155 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
156 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
157 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
158 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
159};
160
161enum dbg_rsc_type {
162 MLX5_DBG_RSC_QP,
163 MLX5_DBG_RSC_EQ,
164 MLX5_DBG_RSC_CQ,
165};
166
167enum port_state_policy {
168 MLX5_POLICY_DOWN = 0,
169 MLX5_POLICY_UP = 1,
170 MLX5_POLICY_FOLLOW = 2,
171 MLX5_POLICY_INVALID = 0xffffffff
172};
173
174struct mlx5_field_desc {
175 struct dentry *dent;
176 int i;
177};
178
179struct mlx5_rsc_debug {
180 struct mlx5_core_dev *dev;
181 void *object;
182 enum dbg_rsc_type type;
183 struct dentry *root;
184 struct mlx5_field_desc fields[0];
185};
186
187enum mlx5_dev_event {
188 MLX5_DEV_EVENT_SYS_ERROR,
189 MLX5_DEV_EVENT_PORT_UP,
190 MLX5_DEV_EVENT_PORT_DOWN,
191 MLX5_DEV_EVENT_PORT_INITIALIZED,
192 MLX5_DEV_EVENT_LID_CHANGE,
193 MLX5_DEV_EVENT_PKEY_CHANGE,
194 MLX5_DEV_EVENT_GUID_CHANGE,
195 MLX5_DEV_EVENT_CLIENT_REREG,
196 MLX5_DEV_EVENT_PPS,
197 MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
198};
199
200enum mlx5_port_status {
201 MLX5_PORT_UP = 1,
202 MLX5_PORT_DOWN = 2,
203};
204
205enum mlx5_eq_type {
206 MLX5_EQ_TYPE_COMP,
207 MLX5_EQ_TYPE_ASYNC,
208#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
209 MLX5_EQ_TYPE_PF,
210#endif
211};
212
213struct mlx5_bfreg_info {
214 u32 *sys_pages;
215 int num_low_latency_bfregs;
216 unsigned int *count;
217
218
219
220
221 struct mutex lock;
222 u32 ver;
223 bool lib_uar_4k;
224 u32 num_sys_pages;
225};
226
227struct mlx5_cmd_first {
228 __be32 data[4];
229};
230
231struct mlx5_cmd_msg {
232 struct list_head list;
233 struct cmd_msg_cache *parent;
234 u32 len;
235 struct mlx5_cmd_first first;
236 struct mlx5_cmd_mailbox *next;
237};
238
239struct mlx5_cmd_debug {
240 struct dentry *dbg_root;
241 struct dentry *dbg_in;
242 struct dentry *dbg_out;
243 struct dentry *dbg_outlen;
244 struct dentry *dbg_status;
245 struct dentry *dbg_run;
246 void *in_msg;
247 void *out_msg;
248 u8 status;
249 u16 inlen;
250 u16 outlen;
251};
252
253struct cmd_msg_cache {
254
255
256 spinlock_t lock;
257 struct list_head head;
258 unsigned int max_inbox_size;
259 unsigned int num_ent;
260};
261
262enum {
263 MLX5_NUM_COMMAND_CACHES = 5,
264};
265
266struct mlx5_cmd_stats {
267 u64 sum;
268 u64 n;
269 struct dentry *root;
270 struct dentry *avg;
271 struct dentry *count;
272
273 spinlock_t lock;
274};
275
276struct mlx5_cmd {
277 void *cmd_alloc_buf;
278 dma_addr_t alloc_dma;
279 int alloc_size;
280 void *cmd_buf;
281 dma_addr_t dma;
282 u16 cmdif_rev;
283 u8 log_sz;
284 u8 log_stride;
285 int max_reg_cmds;
286 int events;
287 u32 __iomem *vector;
288
289
290
291 spinlock_t alloc_lock;
292
293
294
295 spinlock_t token_lock;
296 u8 token;
297 unsigned long bitmask;
298 char wq_name[MLX5_CMD_WQ_MAX_NAME];
299 struct workqueue_struct *wq;
300 struct semaphore sem;
301 struct semaphore pages_sem;
302 int mode;
303 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
304 struct dma_pool *pool;
305 struct mlx5_cmd_debug dbg;
306 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
307 int checksum_disabled;
308 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
309};
310
311struct mlx5_port_caps {
312 int gid_table_len;
313 int pkey_table_len;
314 u8 ext_port_cap;
315 bool has_smi;
316};
317
318struct mlx5_cmd_mailbox {
319 void *buf;
320 dma_addr_t dma;
321 struct mlx5_cmd_mailbox *next;
322};
323
324struct mlx5_buf_list {
325 void *buf;
326 dma_addr_t map;
327};
328
329struct mlx5_buf {
330 struct mlx5_buf_list direct;
331 int npages;
332 int size;
333 u8 page_shift;
334};
335
336struct mlx5_frag_buf {
337 struct mlx5_buf_list *frags;
338 int npages;
339 int size;
340 u8 page_shift;
341};
342
343struct mlx5_eq_tasklet {
344 struct list_head list;
345 struct list_head process_list;
346 struct tasklet_struct task;
347
348 spinlock_t lock;
349};
350
351struct mlx5_eq_pagefault {
352 struct work_struct work;
353
354 spinlock_t lock;
355 struct workqueue_struct *wq;
356 mempool_t *pool;
357};
358
359struct mlx5_eq {
360 struct mlx5_core_dev *dev;
361 __be32 __iomem *doorbell;
362 u32 cons_index;
363 struct mlx5_buf buf;
364 int size;
365 unsigned int irqn;
366 u8 eqn;
367 int nent;
368 u64 mask;
369 struct list_head list;
370 int index;
371 struct mlx5_rsc_debug *dbg;
372 enum mlx5_eq_type type;
373 union {
374 struct mlx5_eq_tasklet tasklet_ctx;
375#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
376 struct mlx5_eq_pagefault pf_ctx;
377#endif
378 };
379};
380
381struct mlx5_core_psv {
382 u32 psv_idx;
383 struct psv_layout {
384 u32 pd;
385 u16 syndrome;
386 u16 reserved;
387 u16 bg;
388 u16 app_tag;
389 u32 ref_tag;
390 } psv;
391};
392
393struct mlx5_core_sig_ctx {
394 struct mlx5_core_psv psv_memory;
395 struct mlx5_core_psv psv_wire;
396 struct ib_sig_err err_item;
397 bool sig_status_checked;
398 bool sig_err_exists;
399 u32 sigerr_count;
400};
401
402enum {
403 MLX5_MKEY_MR = 1,
404 MLX5_MKEY_MW,
405};
406
407struct mlx5_core_mkey {
408 u64 iova;
409 u64 size;
410 u32 key;
411 u32 pd;
412 u32 type;
413};
414
415#define MLX5_24BIT_MASK ((1 << 24) - 1)
416
417enum mlx5_res_type {
418 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
419 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
420 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
421 MLX5_RES_SRQ = 3,
422 MLX5_RES_XSRQ = 4,
423 MLX5_RES_XRQ = 5,
424};
425
426struct mlx5_core_rsc_common {
427 enum mlx5_res_type res;
428 atomic_t refcount;
429 struct completion free;
430};
431
432struct mlx5_core_srq {
433 struct mlx5_core_rsc_common common;
434 u32 srqn;
435 int max;
436 int max_gs;
437 int max_avail_gather;
438 int wqe_shift;
439 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
440
441 atomic_t refcount;
442 struct completion free;
443};
444
445struct mlx5_eq_table {
446 void __iomem *update_ci;
447 void __iomem *update_arm_ci;
448 struct list_head comp_eqs_list;
449 struct mlx5_eq pages_eq;
450 struct mlx5_eq async_eq;
451 struct mlx5_eq cmd_eq;
452#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
453 struct mlx5_eq pfault_eq;
454#endif
455 int num_comp_vectors;
456
457
458 spinlock_t lock;
459};
460
461struct mlx5_uars_page {
462 void __iomem *map;
463 bool wc;
464 u32 index;
465 struct list_head list;
466 unsigned int bfregs;
467 unsigned long *reg_bitmap;
468 unsigned long *fp_bitmap;
469 unsigned int reg_avail;
470 unsigned int fp_avail;
471 struct kref ref_count;
472 struct mlx5_core_dev *mdev;
473};
474
475struct mlx5_bfreg_head {
476
477 struct mutex lock;
478 struct list_head list;
479};
480
481struct mlx5_bfreg_data {
482 struct mlx5_bfreg_head reg_head;
483 struct mlx5_bfreg_head wc_head;
484};
485
486struct mlx5_sq_bfreg {
487 void __iomem *map;
488 struct mlx5_uars_page *up;
489 bool wc;
490 u32 index;
491 unsigned int offset;
492};
493
494struct mlx5_core_health {
495 struct health_buffer __iomem *health;
496 __be32 __iomem *health_counter;
497 struct timer_list timer;
498 u32 prev;
499 int miss_counter;
500 bool sick;
501
502 spinlock_t wq_lock;
503 struct workqueue_struct *wq;
504 unsigned long flags;
505 struct work_struct work;
506 struct delayed_work recover_work;
507};
508
509struct mlx5_cq_table {
510
511
512 spinlock_t lock;
513 struct radix_tree_root tree;
514};
515
516struct mlx5_qp_table {
517
518
519 spinlock_t lock;
520 struct radix_tree_root tree;
521};
522
523struct mlx5_srq_table {
524
525
526 spinlock_t lock;
527 struct radix_tree_root tree;
528};
529
530struct mlx5_mkey_table {
531
532
533 rwlock_t lock;
534 struct radix_tree_root tree;
535};
536
537struct mlx5_vf_context {
538 int enabled;
539 u64 port_guid;
540 u64 node_guid;
541 enum port_state_policy policy;
542};
543
544struct mlx5_core_sriov {
545 struct mlx5_vf_context *vfs_ctx;
546 int num_vfs;
547 int enabled_vfs;
548};
549
550struct mlx5_irq_info {
551 cpumask_var_t mask;
552 char name[MLX5_MAX_IRQ_NAME];
553};
554
555struct mlx5_fc_stats {
556 struct rb_root counters;
557 struct list_head addlist;
558
559 spinlock_t addlist_lock;
560
561 struct workqueue_struct *wq;
562 struct delayed_work work;
563 unsigned long next_query;
564 unsigned long sampling_interval;
565};
566
567struct mlx5_mpfs;
568struct mlx5_eswitch;
569struct mlx5_lag;
570struct mlx5_pagefault;
571
572struct mlx5_rl_entry {
573 u32 rate;
574 u16 index;
575 u16 refcount;
576};
577
578struct mlx5_rl_table {
579
580 struct mutex rl_lock;
581 u16 max_size;
582 u32 max_rate;
583 u32 min_rate;
584 struct mlx5_rl_entry *rl_entry;
585};
586
587enum port_module_event_status_type {
588 MLX5_MODULE_STATUS_PLUGGED = 0x1,
589 MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
590 MLX5_MODULE_STATUS_ERROR = 0x3,
591 MLX5_MODULE_STATUS_NUM = 0x3,
592};
593
594enum port_module_event_error_type {
595 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
596 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
597 MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
598 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
599 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
600 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
601 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
602 MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
603 MLX5_MODULE_EVENT_ERROR_UNKNOWN,
604 MLX5_MODULE_EVENT_ERROR_NUM,
605};
606
607struct mlx5_port_module_event_stats {
608 u64 status_counters[MLX5_MODULE_STATUS_NUM];
609 u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
610};
611
612struct mlx5_priv {
613 char name[MLX5_MAX_NAME_LEN];
614 struct mlx5_eq_table eq_table;
615 struct mlx5_irq_info *irq_info;
616
617
618 struct workqueue_struct *pg_wq;
619 struct rb_root page_root;
620 int fw_pages;
621 atomic_t reg_pages;
622 struct list_head free_list;
623 int vfs_pages;
624
625 struct mlx5_core_health health;
626
627 struct mlx5_srq_table srq_table;
628
629
630 struct mlx5_qp_table qp_table;
631 struct dentry *qp_debugfs;
632 struct dentry *eq_debugfs;
633 struct dentry *cq_debugfs;
634 struct dentry *cmdif_debugfs;
635
636
637
638 struct mlx5_cq_table cq_table;
639
640
641
642 struct mlx5_mkey_table mkey_table;
643
644
645
646
647 struct mutex alloc_mutex;
648 int numa_node;
649
650 struct mutex pgdir_mutex;
651 struct list_head pgdir_list;
652
653 struct dentry *dbg_root;
654
655
656 spinlock_t mkey_lock;
657 u8 mkey_key;
658
659 struct list_head dev_list;
660 struct list_head ctx_list;
661 spinlock_t ctx_lock;
662
663 struct list_head waiting_events_list;
664 bool is_accum_events;
665
666 struct mlx5_flow_steering *steering;
667 struct mlx5_mpfs *mpfs;
668 struct mlx5_eswitch *eswitch;
669 struct mlx5_core_sriov sriov;
670 struct mlx5_lag *lag;
671 unsigned long pci_dev_data;
672 struct mlx5_fc_stats fc_stats;
673 struct mlx5_rl_table rl_table;
674
675 struct mlx5_port_module_event_stats pme_stats;
676
677#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
678 void (*pfault)(struct mlx5_core_dev *dev,
679 void *context,
680 struct mlx5_pagefault *pfault);
681 void *pfault_ctx;
682 struct srcu_struct pfault_srcu;
683#endif
684 struct mlx5_bfreg_data bfregs;
685 struct mlx5_uars_page *uar;
686};
687
688enum mlx5_device_state {
689 MLX5_DEVICE_STATE_UP,
690 MLX5_DEVICE_STATE_INTERNAL_ERROR,
691};
692
693enum mlx5_interface_state {
694 MLX5_INTERFACE_STATE_UP = BIT(0),
695};
696
697enum mlx5_pci_status {
698 MLX5_PCI_STATUS_DISABLED,
699 MLX5_PCI_STATUS_ENABLED,
700};
701
702enum mlx5_pagefault_type_flags {
703 MLX5_PFAULT_REQUESTOR = 1 << 0,
704 MLX5_PFAULT_WRITE = 1 << 1,
705 MLX5_PFAULT_RDMA = 1 << 2,
706};
707
708
709struct mlx5_pagefault {
710 u32 bytes_committed;
711 u32 token;
712 u8 event_subtype;
713 u8 type;
714 union {
715
716 struct {
717
718 u32 packet_size;
719
720
721
722 u32 wq_num;
723
724
725
726
727 u16 wqe_index;
728 } wqe;
729
730 struct {
731 u32 r_key;
732
733
734
735
736 u32 packet_size;
737 u32 rdma_op_len;
738 u64 rdma_va;
739 } rdma;
740 };
741
742 struct mlx5_eq *eq;
743 struct work_struct work;
744};
745
746struct mlx5_td {
747 struct list_head tirs_list;
748 u32 tdn;
749};
750
751struct mlx5e_resources {
752 u32 pdn;
753 struct mlx5_td td;
754 struct mlx5_core_mkey mkey;
755 struct mlx5_sq_bfreg bfreg;
756};
757
758#define MLX5_MAX_RESERVED_GIDS 8
759
760struct mlx5_rsvd_gids {
761 unsigned int start;
762 unsigned int count;
763 struct ida ida;
764};
765
766#define MAX_PIN_NUM 8
767struct mlx5_pps {
768 u8 pin_caps[MAX_PIN_NUM];
769 struct work_struct out_work;
770 u64 start[MAX_PIN_NUM];
771 u8 enabled;
772};
773
774struct mlx5_clock {
775 rwlock_t lock;
776 struct cyclecounter cycles;
777 struct timecounter tc;
778 struct hwtstamp_config hwtstamp_config;
779 u32 nominal_c_mult;
780 unsigned long overflow_period;
781 struct delayed_work overflow_work;
782 struct ptp_clock *ptp;
783 struct ptp_clock_info ptp_info;
784 struct mlx5_pps pps_info;
785};
786
787struct mlx5_core_dev {
788 struct pci_dev *pdev;
789
790 struct mutex pci_status_mutex;
791 enum mlx5_pci_status pci_status;
792 u8 rev_id;
793 char board_id[MLX5_BOARD_ID_LEN];
794 struct mlx5_cmd cmd;
795 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
796 struct {
797 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
798 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
799 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
800 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
801 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
802 } caps;
803 phys_addr_t iseg_base;
804 struct mlx5_init_seg __iomem *iseg;
805 enum mlx5_device_state state;
806
807 struct mutex intf_state_mutex;
808 unsigned long intf_state;
809 void (*event) (struct mlx5_core_dev *dev,
810 enum mlx5_dev_event event,
811 unsigned long param);
812 struct mlx5_priv priv;
813 struct mlx5_profile *profile;
814 atomic_t num_qps;
815 u32 issi;
816 struct mlx5e_resources mlx5e_res;
817 struct {
818 struct mlx5_rsvd_gids reserved_gids;
819 atomic_t roce_en;
820 } roce;
821#ifdef CONFIG_MLX5_FPGA
822 struct mlx5_fpga_device *fpga;
823#endif
824#ifdef CONFIG_RFS_ACCEL
825 struct cpu_rmap *rmap;
826#endif
827 struct mlx5_clock clock;
828};
829
830struct mlx5_db {
831 __be32 *db;
832 union {
833 struct mlx5_db_pgdir *pgdir;
834 struct mlx5_ib_user_db_page *user_page;
835 } u;
836 dma_addr_t dma;
837 int index;
838};
839
840enum {
841 MLX5_COMP_EQ_SIZE = 1024,
842};
843
844enum {
845 MLX5_PTYS_IB = 1 << 0,
846 MLX5_PTYS_EN = 1 << 2,
847};
848
849typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
850
851enum {
852 MLX5_CMD_ENT_STATE_PENDING_COMP,
853};
854
855struct mlx5_cmd_work_ent {
856 unsigned long state;
857 struct mlx5_cmd_msg *in;
858 struct mlx5_cmd_msg *out;
859 void *uout;
860 int uout_size;
861 mlx5_cmd_cbk_t callback;
862 struct delayed_work cb_timeout_work;
863 void *context;
864 int idx;
865 struct completion done;
866 struct mlx5_cmd *cmd;
867 struct work_struct work;
868 struct mlx5_cmd_layout *lay;
869 int ret;
870 int page_queue;
871 u8 status;
872 u8 token;
873 u64 ts1;
874 u64 ts2;
875 u16 op;
876};
877
878struct mlx5_pas {
879 u64 pa;
880 u8 log_sz;
881};
882
883enum phy_port_state {
884 MLX5_AAA_111
885};
886
887struct mlx5_hca_vport_context {
888 u32 field_select;
889 bool sm_virt_aware;
890 bool has_smi;
891 bool has_raw;
892 enum port_state_policy policy;
893 enum phy_port_state phys_state;
894 enum ib_port_state vport_state;
895 u8 port_physical_state;
896 u64 sys_image_guid;
897 u64 port_guid;
898 u64 node_guid;
899 u32 cap_mask1;
900 u32 cap_mask1_perm;
901 u32 cap_mask2;
902 u32 cap_mask2_perm;
903 u16 lid;
904 u8 init_type_reply;
905 u8 lmc;
906 u8 subnet_timeout;
907 u16 sm_lid;
908 u8 sm_sl;
909 u16 qkey_violation_counter;
910 u16 pkey_violation_counter;
911 bool grh_required;
912};
913
914static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
915{
916 return buf->direct.buf + offset;
917}
918
919#define STRUCT_FIELD(header, field) \
920 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
921 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
922
923static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
924{
925 return pci_get_drvdata(pdev);
926}
927
928extern struct dentry *mlx5_debugfs_root;
929
930static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
931{
932 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
933}
934
935static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
936{
937 return ioread32be(&dev->iseg->fw_rev) >> 16;
938}
939
940static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
941{
942 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
943}
944
945static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
946{
947 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
948}
949
950static inline u32 mlx5_base_mkey(const u32 key)
951{
952 return key & 0xffffff00u;
953}
954
955int mlx5_cmd_init(struct mlx5_core_dev *dev);
956void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
957void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
958void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
959
960int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
961 int out_size);
962int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
963 void *out, int out_size, mlx5_cmd_cbk_t callback,
964 void *context);
965void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
966
967int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
968int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
969int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
970void mlx5_health_cleanup(struct mlx5_core_dev *dev);
971int mlx5_health_init(struct mlx5_core_dev *dev);
972void mlx5_start_health_poll(struct mlx5_core_dev *dev);
973void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
974void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
975void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
976void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
977int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
978 struct mlx5_buf *buf, int node);
979int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
980void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
981int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
982 struct mlx5_frag_buf *buf, int node);
983void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
984struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
985 gfp_t flags, int npages);
986void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
987 struct mlx5_cmd_mailbox *head);
988int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
989 struct mlx5_srq_attr *in);
990int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
991int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
992 struct mlx5_srq_attr *out);
993int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
994 u16 lwm, int is_srq);
995void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
996void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
997int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
998 struct mlx5_core_mkey *mkey,
999 u32 *in, int inlen,
1000 u32 *out, int outlen,
1001 mlx5_cmd_cbk_t callback, void *context);
1002int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1003 struct mlx5_core_mkey *mkey,
1004 u32 *in, int inlen);
1005int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
1006 struct mlx5_core_mkey *mkey);
1007int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
1008 u32 *out, int outlen);
1009int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
1010 u32 *mkey);
1011int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1012int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1013int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
1014 u16 opmod, u8 port);
1015void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1016void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1017int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1018void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1019void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1020 s32 npages);
1021int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1022int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1023void mlx5_register_debugfs(void);
1024void mlx5_unregister_debugfs(void);
1025int mlx5_eq_init(struct mlx5_core_dev *dev);
1026void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
1027void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
1028void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1029void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
1030void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
1031void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
1032struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
1033void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
1034void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
1035int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
1036 int nent, u64 mask, const char *name,
1037 enum mlx5_eq_type type);
1038int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1039int mlx5_start_eqs(struct mlx5_core_dev *dev);
1040void mlx5_stop_eqs(struct mlx5_core_dev *dev);
1041int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1042 unsigned int *irqn);
1043int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1044int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1045
1046int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1047void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1048int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1049 int size_in, void *data_out, int size_out,
1050 u16 reg_num, int arg, int write);
1051
1052int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1053void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
1054int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
1055 u32 *out, int outlen);
1056int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
1057void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
1058int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
1059void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
1060int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1061int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1062 int node);
1063void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1064
1065const char *mlx5_command_str(int command);
1066int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1067void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1068int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1069 int npsvs, u32 *sig_index);
1070int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1071void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1072int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1073 struct mlx5_odp_caps *odp_caps);
1074int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1075 u8 port_num, void *out, size_t sz);
1076#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1077int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
1078 u32 wq_num, u8 type, int error);
1079#endif
1080
1081int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1082void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1083int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
1084void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
1085bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1086int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1087 bool map_wc, bool fast_path);
1088void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1089
1090unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1091int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1092 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1093 const u8 *mac, bool vlan, u16 vlan_id);
1094
1095static inline int fw_initializing(struct mlx5_core_dev *dev)
1096{
1097 return ioread32be(&dev->iseg->initializing) >> 31;
1098}
1099
1100static inline u32 mlx5_mkey_to_idx(u32 mkey)
1101{
1102 return mkey >> 8;
1103}
1104
1105static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1106{
1107 return mkey_idx << 8;
1108}
1109
1110static inline u8 mlx5_mkey_variant(u32 mkey)
1111{
1112 return mkey & 0xff;
1113}
1114
1115enum {
1116 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
1117 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
1118};
1119
1120enum {
1121 MR_CACHE_LAST_STD_ENTRY = 20,
1122 MLX5_IMR_MTT_CACHE_ENTRY,
1123 MLX5_IMR_KSM_CACHE_ENTRY,
1124 MAX_MR_CACHE_ENTRIES
1125};
1126
1127enum {
1128 MLX5_INTERFACE_PROTOCOL_IB = 0,
1129 MLX5_INTERFACE_PROTOCOL_ETH = 1,
1130};
1131
1132struct mlx5_interface {
1133 void * (*add)(struct mlx5_core_dev *dev);
1134 void (*remove)(struct mlx5_core_dev *dev, void *context);
1135 int (*attach)(struct mlx5_core_dev *dev, void *context);
1136 void (*detach)(struct mlx5_core_dev *dev, void *context);
1137 void (*event)(struct mlx5_core_dev *dev, void *context,
1138 enum mlx5_dev_event event, unsigned long param);
1139 void (*pfault)(struct mlx5_core_dev *dev,
1140 void *context,
1141 struct mlx5_pagefault *pfault);
1142 void * (*get_dev)(void *context);
1143 int protocol;
1144 struct list_head list;
1145};
1146
1147void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
1148int mlx5_register_interface(struct mlx5_interface *intf);
1149void mlx5_unregister_interface(struct mlx5_interface *intf);
1150int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1151
1152int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1153int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1154bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1155struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1156struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1157void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1158
1159#ifndef CONFIG_MLX5_CORE_IPOIB
1160static inline
1161struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1162 struct ib_device *ibdev,
1163 const char *name,
1164 void (*setup)(struct net_device *))
1165{
1166 return ERR_PTR(-EOPNOTSUPP);
1167}
1168
1169static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
1170#else
1171struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1172 struct ib_device *ibdev,
1173 const char *name,
1174 void (*setup)(struct net_device *));
1175void mlx5_rdma_netdev_free(struct net_device *netdev);
1176#endif
1177
1178struct mlx5_profile {
1179 u64 mask;
1180 u8 log_max_qp;
1181 struct {
1182 int size;
1183 int limit;
1184 } mr_cache[MAX_MR_CACHE_ENTRIES];
1185};
1186
1187enum {
1188 MLX5_PCI_DEV_IS_VF = 1 << 0,
1189};
1190
1191static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
1192{
1193 return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
1194}
1195
1196static inline int mlx5_get_gid_table_len(u16 param)
1197{
1198 if (param > 4) {
1199 pr_warn("gid table length is zero\n");
1200 return 0;
1201 }
1202
1203 return 8 * (1 << param);
1204}
1205
1206static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1207{
1208 return !!(dev->priv.rl_table.max_size);
1209}
1210
1211enum {
1212 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1213};
1214
1215static inline const struct cpumask *
1216mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
1217{
1218 return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
1219}
1220
1221#endif
1222