1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#ifndef __MLX5_CORE_H__
34#define __MLX5_CORE_H__
35
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/if_link.h>
40#include <linux/firmware.h>
41#include <linux/ptp_clock_kernel.h>
42#include <linux/mlx5/cq.h>
43#include <linux/mlx5/fs.h>
44#include <linux/mlx5/driver.h>
45
46#define DRIVER_NAME "mlx5_core"
47#define DRIVER_VERSION "5.0-0"
48
49extern uint mlx5_core_debug_mask;
50
51#define mlx5_core_dbg(__dev, format, ...) \
52 dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
53 __func__, __LINE__, current->pid, \
54 ##__VA_ARGS__)
55
56#define mlx5_core_dbg_once(__dev, format, ...) \
57 dev_dbg_once((__dev)->device, \
58 "%s:%d:(pid %d): " format, \
59 __func__, __LINE__, current->pid, \
60 ##__VA_ARGS__)
61
62#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
63do { \
64 if ((mask) & mlx5_core_debug_mask) \
65 mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
66} while (0)
67
68#define mlx5_core_err(__dev, format, ...) \
69 dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
70 __func__, __LINE__, current->pid, \
71 ##__VA_ARGS__)
72
73#define mlx5_core_err_rl(__dev, format, ...) \
74 dev_err_ratelimited((__dev)->device, \
75 "%s:%d:(pid %d): " format, \
76 __func__, __LINE__, current->pid, \
77 ##__VA_ARGS__)
78
79#define mlx5_core_warn(__dev, format, ...) \
80 dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
81 __func__, __LINE__, current->pid, \
82 ##__VA_ARGS__)
83
84#define mlx5_core_warn_once(__dev, format, ...) \
85 dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
86 __func__, __LINE__, current->pid, \
87 ##__VA_ARGS__)
88
89#define mlx5_core_warn_rl(__dev, format, ...) \
90 dev_warn_ratelimited((__dev)->device, \
91 "%s:%d:(pid %d): " format, \
92 __func__, __LINE__, current->pid, \
93 ##__VA_ARGS__)
94
95#define mlx5_core_info(__dev, format, ...) \
96 dev_info((__dev)->device, format, ##__VA_ARGS__)
97
98#define mlx5_core_info_rl(__dev, format, ...) \
99 dev_info_ratelimited((__dev)->device, \
100 "%s:%d:(pid %d): " format, \
101 __func__, __LINE__, current->pid, \
102 ##__VA_ARGS__)
103
104enum {
105 MLX5_CMD_DATA,
106 MLX5_CMD_TIME,
107};
108
109enum {
110 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
111 MLX5_DRIVER_SYND = 0xbadd00de,
112};
113
114enum mlx5_semaphore_space_address {
115 MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA,
116 MLX5_SEMAPHORE_SW_RESET = 0x20,
117};
118
119int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
120int mlx5_query_board_id(struct mlx5_core_dev *dev);
121int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
122int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
123int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
124int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
125void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
126void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
127void mlx5_disable_device(struct mlx5_core_dev *dev);
128void mlx5_recover_device(struct mlx5_core_dev *dev);
129int mlx5_sriov_init(struct mlx5_core_dev *dev);
130void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
131int mlx5_sriov_attach(struct mlx5_core_dev *dev);
132void mlx5_sriov_detach(struct mlx5_core_dev *dev);
133int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
134int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
135int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
136int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
137 void *context, u32 *element_id);
138int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
139 void *context, u32 element_id,
140 u32 modify_bitmask);
141int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
142 u32 element_id);
143int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages);
144u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
145 struct ptp_system_timestamp *sts);
146
147void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
148void mlx5_cmd_flush(struct mlx5_core_dev *dev);
149int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
150void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
151
152int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
153 u8 access_reg_group);
154int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
155 u8 access_reg_group);
156int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
157 u8 feature_group, u8 access_reg_group);
158
159void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
160void mlx5_lag_remove(struct mlx5_core_dev *dev);
161
162int mlx5_irq_table_init(struct mlx5_core_dev *dev);
163void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
164int mlx5_irq_table_create(struct mlx5_core_dev *dev);
165void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
166int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
167 struct notifier_block *nb);
168int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
169 struct notifier_block *nb);
170struct cpumask *
171mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
172struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
173int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
174
175int mlx5_events_init(struct mlx5_core_dev *dev);
176void mlx5_events_cleanup(struct mlx5_core_dev *dev);
177void mlx5_events_start(struct mlx5_core_dev *dev);
178void mlx5_events_stop(struct mlx5_core_dev *dev);
179
180void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
181void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
182void mlx5_attach_device(struct mlx5_core_dev *dev);
183void mlx5_detach_device(struct mlx5_core_dev *dev);
184bool mlx5_device_registered(struct mlx5_core_dev *dev);
185int mlx5_register_device(struct mlx5_core_dev *dev);
186void mlx5_unregister_device(struct mlx5_core_dev *dev);
187void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
188void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
189struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
190void mlx5_dev_list_lock(void);
191void mlx5_dev_list_unlock(void);
192int mlx5_dev_list_trylock(void);
193
194bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
195
196int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
197int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
198int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
199int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
200
201#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
202 MLX5_CAP_GEN((mdev), pps_modify) && \
203 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
204 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
205
206int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw,
207 struct netlink_ext_ack *extack);
208int mlx5_fw_version_query(struct mlx5_core_dev *dev,
209 u32 *running_ver, u32 *stored_ver);
210
211void mlx5e_init(void);
212void mlx5e_cleanup(void);
213
214static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
215{
216 return pci_num_vf(dev->pdev) ? true : false;
217}
218
219static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
220{
221
222
223
224
225
226 return MLX5_CAP_GEN(dev, vport_group_manager) &&
227 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
228 MLX5_CAP_GEN(dev, lag_master);
229}
230
231void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
232void mlx5_lag_update(struct mlx5_core_dev *dev);
233
234enum {
235 MLX5_NIC_IFC_FULL = 0,
236 MLX5_NIC_IFC_DISABLED = 1,
237 MLX5_NIC_IFC_NO_DRAM_NIC = 2,
238 MLX5_NIC_IFC_SW_RESET = 7
239};
240
241u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
242void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
243#endif
244