1
2
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
14#include <linux/if_vlan.h>
15#include <linux/crash_dump.h>
16#include <net/rtnetlink.h>
17#include "hclge_cmd.h"
18#include "hclge_dcb.h"
19#include "hclge_main.h"
20#include "hclge_mbx.h"
21#include "hclge_mdio.h"
22#include "hclge_tm.h"
23#include "hclge_err.h"
24#include "hnae3.h"
25
26#define HCLGE_NAME "hclge"
27#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30#define HCLGE_BUF_SIZE_UNIT 256U
31#define HCLGE_BUF_MUL_BY 2
32#define HCLGE_BUF_DIV_BY 2
33#define NEED_RESERVE_TC_NUM 2
34#define BUF_MAX_PERCENT 100
35#define BUF_RESERVE_PERCENT 90
36
37#define HCLGE_RESET_MAX_FAIL_CNT 5
38#define HCLGE_RESET_SYNC_TIME 100
39#define HCLGE_PF_RESET_SYNC_TIME 20
40#define HCLGE_PF_RESET_SYNC_CNT 1500
41
42
43#define HCLGE_DFX_BIOS_BD_OFFSET 1
44#define HCLGE_DFX_SSU_0_BD_OFFSET 2
45#define HCLGE_DFX_SSU_1_BD_OFFSET 3
46#define HCLGE_DFX_IGU_BD_OFFSET 4
47#define HCLGE_DFX_RPU_0_BD_OFFSET 5
48#define HCLGE_DFX_RPU_1_BD_OFFSET 6
49#define HCLGE_DFX_NCSI_BD_OFFSET 7
50#define HCLGE_DFX_RTC_BD_OFFSET 8
51#define HCLGE_DFX_PPP_BD_OFFSET 9
52#define HCLGE_DFX_RCB_BD_OFFSET 10
53#define HCLGE_DFX_TQP_BD_OFFSET 11
54#define HCLGE_DFX_SSU_2_BD_OFFSET 12
55
56#define HCLGE_LINK_STATUS_MS 10
57
58#define HCLGE_VF_VPORT_START_NUM 1
59
60static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61static int hclge_init_vlan_config(struct hclge_dev *hdev);
62static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 unsigned long *addr);
69static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71static void hclge_sync_mac_table(struct hclge_dev *hdev);
72static void hclge_restore_hw_table(struct hclge_dev *hdev);
73static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75static struct hnae3_ae_algo ae_algo;
76
77static struct workqueue_struct *hclge_wq;
78
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87
88 {0, }
89};
90
91MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 HCLGE_CMDQ_TX_ADDR_H_REG,
95 HCLGE_CMDQ_TX_DEPTH_REG,
96 HCLGE_CMDQ_TX_TAIL_REG,
97 HCLGE_CMDQ_TX_HEAD_REG,
98 HCLGE_CMDQ_RX_ADDR_L_REG,
99 HCLGE_CMDQ_RX_ADDR_H_REG,
100 HCLGE_CMDQ_RX_DEPTH_REG,
101 HCLGE_CMDQ_RX_TAIL_REG,
102 HCLGE_CMDQ_RX_HEAD_REG,
103 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 HCLGE_CMDQ_INTR_STS_REG,
105 HCLGE_CMDQ_INTR_EN_REG,
106 HCLGE_CMDQ_INTR_GEN_REG};
107
108static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 HCLGE_VECTOR0_OTER_EN_REG,
110 HCLGE_MISC_RESET_STS_REG,
111 HCLGE_MISC_VECTOR_INT_STS,
112 HCLGE_GLOBAL_RESET_REG,
113 HCLGE_FUN_RST_ING,
114 HCLGE_GRO_EN_REG};
115
116static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 HCLGE_RING_RX_ADDR_H_REG,
118 HCLGE_RING_RX_BD_NUM_REG,
119 HCLGE_RING_RX_BD_LENGTH_REG,
120 HCLGE_RING_RX_MERGE_EN_REG,
121 HCLGE_RING_RX_TAIL_REG,
122 HCLGE_RING_RX_HEAD_REG,
123 HCLGE_RING_RX_FBD_NUM_REG,
124 HCLGE_RING_RX_OFFSET_REG,
125 HCLGE_RING_RX_FBD_OFFSET_REG,
126 HCLGE_RING_RX_STASH_REG,
127 HCLGE_RING_RX_BD_ERR_REG,
128 HCLGE_RING_TX_ADDR_L_REG,
129 HCLGE_RING_TX_ADDR_H_REG,
130 HCLGE_RING_TX_BD_NUM_REG,
131 HCLGE_RING_TX_PRIORITY_REG,
132 HCLGE_RING_TX_TC_REG,
133 HCLGE_RING_TX_MERGE_EN_REG,
134 HCLGE_RING_TX_TAIL_REG,
135 HCLGE_RING_TX_HEAD_REG,
136 HCLGE_RING_TX_FBD_NUM_REG,
137 HCLGE_RING_TX_OFFSET_REG,
138 HCLGE_RING_TX_EBD_NUM_REG,
139 HCLGE_RING_TX_EBD_OFFSET_REG,
140 HCLGE_RING_TX_BD_ERR_REG,
141 HCLGE_RING_EN_REG};
142
143static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 HCLGE_TQP_INTR_GL0_REG,
145 HCLGE_TQP_INTR_GL1_REG,
146 HCLGE_TQP_INTR_GL2_REG,
147 HCLGE_TQP_INTR_RL_REG};
148
149static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 "App Loopback test",
151 "Serdes serial Loopback test",
152 "Serdes parallel Loopback test",
153 "Phy Loopback test"
154};
155
156static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 {"mac_tx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 {"mac_rx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 {"mac_tx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 {"mac_rx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 {"mac_tx_pfc_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 {"mac_tx_pfc_pri0_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 {"mac_tx_pfc_pri1_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 {"mac_tx_pfc_pri2_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 {"mac_tx_pfc_pri3_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 {"mac_tx_pfc_pri4_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 {"mac_tx_pfc_pri5_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 {"mac_tx_pfc_pri6_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 {"mac_tx_pfc_pri7_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 {"mac_rx_pfc_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 {"mac_rx_pfc_pri0_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 {"mac_rx_pfc_pri1_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 {"mac_rx_pfc_pri2_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 {"mac_rx_pfc_pri3_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 {"mac_rx_pfc_pri4_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 {"mac_rx_pfc_pri5_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 {"mac_rx_pfc_pri6_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 {"mac_rx_pfc_pri7_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 {"mac_tx_total_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 {"mac_tx_total_oct_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 {"mac_tx_good_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 {"mac_tx_bad_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 {"mac_tx_good_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 {"mac_tx_bad_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 {"mac_tx_uni_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 {"mac_tx_multi_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 {"mac_tx_broad_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 {"mac_tx_undersize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 {"mac_tx_oversize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 {"mac_tx_64_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 {"mac_tx_65_127_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 {"mac_tx_128_255_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 {"mac_tx_256_511_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 {"mac_tx_512_1023_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 {"mac_tx_1024_1518_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 {"mac_tx_1519_2047_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 {"mac_tx_2048_4095_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 {"mac_tx_4096_8191_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 {"mac_tx_8192_9216_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 {"mac_tx_9217_12287_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 {"mac_tx_12288_16383_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 {"mac_tx_1519_max_good_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 {"mac_tx_1519_max_bad_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 {"mac_rx_total_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 {"mac_rx_total_oct_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 {"mac_rx_good_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 {"mac_rx_bad_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 {"mac_rx_good_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 {"mac_rx_bad_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 {"mac_rx_uni_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 {"mac_rx_multi_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 {"mac_rx_broad_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 {"mac_rx_undersize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 {"mac_rx_oversize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 {"mac_rx_64_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 {"mac_rx_65_127_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 {"mac_rx_128_255_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 {"mac_rx_256_511_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 {"mac_rx_512_1023_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 {"mac_rx_1024_1518_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 {"mac_rx_1519_2047_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 {"mac_rx_2048_4095_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 {"mac_rx_4096_8191_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 {"mac_rx_8192_9216_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 {"mac_rx_9217_12287_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 {"mac_rx_12288_16383_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 {"mac_rx_1519_max_good_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 {"mac_rx_1519_max_bad_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302 {"mac_tx_fragment_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 {"mac_tx_undermin_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 {"mac_tx_jabber_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 {"mac_tx_err_all_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 {"mac_tx_from_app_good_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 {"mac_tx_from_app_bad_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 {"mac_rx_fragment_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 {"mac_rx_undermin_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 {"mac_rx_jabber_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 {"mac_rx_fcs_err_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 {"mac_rx_send_app_good_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 {"mac_rx_send_app_bad_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326};
327
328static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 {
330 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 .i_port_bitmap = 0x1,
334 },
335};
336
337static const u8 hclge_hash_key[] = {
338 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343};
344
345static const u32 hclge_dfx_bd_offset_list[] = {
346 HCLGE_DFX_BIOS_BD_OFFSET,
347 HCLGE_DFX_SSU_0_BD_OFFSET,
348 HCLGE_DFX_SSU_1_BD_OFFSET,
349 HCLGE_DFX_IGU_BD_OFFSET,
350 HCLGE_DFX_RPU_0_BD_OFFSET,
351 HCLGE_DFX_RPU_1_BD_OFFSET,
352 HCLGE_DFX_NCSI_BD_OFFSET,
353 HCLGE_DFX_RTC_BD_OFFSET,
354 HCLGE_DFX_PPP_BD_OFFSET,
355 HCLGE_DFX_RCB_BD_OFFSET,
356 HCLGE_DFX_TQP_BD_OFFSET,
357 HCLGE_DFX_SSU_2_BD_OFFSET
358};
359
360static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 HCLGE_OPC_DFX_SSU_REG_0,
363 HCLGE_OPC_DFX_SSU_REG_1,
364 HCLGE_OPC_DFX_IGU_EGU_REG,
365 HCLGE_OPC_DFX_RPU_REG_0,
366 HCLGE_OPC_DFX_RPU_REG_1,
367 HCLGE_OPC_DFX_NCSI_REG,
368 HCLGE_OPC_DFX_RTC_REG,
369 HCLGE_OPC_DFX_PPP_REG,
370 HCLGE_OPC_DFX_RCB_REG,
371 HCLGE_OPC_DFX_TQP_REG,
372 HCLGE_OPC_DFX_SSU_REG_2
373};
374
375static const struct key_info meta_data_key_info[] = {
376 { PACKET_TYPE_ID, 6},
377 { IP_FRAGEMENT, 1},
378 { ROCE_TYPE, 1},
379 { NEXT_KEY, 5},
380 { VLAN_NUMBER, 2},
381 { SRC_VPORT, 12},
382 { DST_VPORT, 12},
383 { TUNNEL_PACKET, 1},
384};
385
386static const struct key_info tuple_key_info[] = {
387 { OUTER_DST_MAC, 48},
388 { OUTER_SRC_MAC, 48},
389 { OUTER_VLAN_TAG_FST, 16},
390 { OUTER_VLAN_TAG_SEC, 16},
391 { OUTER_ETH_TYPE, 16},
392 { OUTER_L2_RSV, 16},
393 { OUTER_IP_TOS, 8},
394 { OUTER_IP_PROTO, 8},
395 { OUTER_SRC_IP, 32},
396 { OUTER_DST_IP, 32},
397 { OUTER_L3_RSV, 16},
398 { OUTER_SRC_PORT, 16},
399 { OUTER_DST_PORT, 16},
400 { OUTER_L4_RSV, 32},
401 { OUTER_TUN_VNI, 24},
402 { OUTER_TUN_FLOW_ID, 8},
403 { INNER_DST_MAC, 48},
404 { INNER_SRC_MAC, 48},
405 { INNER_VLAN_TAG_FST, 16},
406 { INNER_VLAN_TAG_SEC, 16},
407 { INNER_ETH_TYPE, 16},
408 { INNER_L2_RSV, 16},
409 { INNER_IP_TOS, 8},
410 { INNER_IP_PROTO, 8},
411 { INNER_SRC_IP, 32},
412 { INNER_DST_IP, 32},
413 { INNER_L3_RSV, 16},
414 { INNER_SRC_PORT, 16},
415 { INNER_DST_PORT, 16},
416 { INNER_L4_RSV, 32},
417};
418
419static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420{
421#define HCLGE_MAC_CMD_NUM 21
422
423 u64 *data = (u64 *)(&hdev->mac_stats);
424 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 __le64 *desc_data;
426 int i, k, n;
427 int ret;
428
429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 if (ret) {
432 dev_err(&hdev->pdev->dev,
433 "Get MAC pkt stats fail, status = %d.\n", ret);
434
435 return ret;
436 }
437
438 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439
440 if (unlikely(i == 0)) {
441 desc_data = (__le64 *)(&desc[i].data[0]);
442 n = HCLGE_RD_FIRST_STATS_NUM;
443 } else {
444 desc_data = (__le64 *)(&desc[i]);
445 n = HCLGE_RD_OTHER_STATS_NUM;
446 }
447
448 for (k = 0; k < n; k++) {
449 *data += le64_to_cpu(*desc_data);
450 data++;
451 desc_data++;
452 }
453 }
454
455 return 0;
456}
457
458static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459{
460 u64 *data = (u64 *)(&hdev->mac_stats);
461 struct hclge_desc *desc;
462 __le64 *desc_data;
463 u16 i, k, n;
464 int ret;
465
466
467
468
469 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470 if (!desc)
471 return -ENOMEM;
472
473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 if (ret) {
476 kfree(desc);
477 return ret;
478 }
479
480 for (i = 0; i < desc_num; i++) {
481
482 if (i == 0) {
483 desc_data = (__le64 *)(&desc[i].data[0]);
484 n = HCLGE_RD_FIRST_STATS_NUM;
485 } else {
486 desc_data = (__le64 *)(&desc[i]);
487 n = HCLGE_RD_OTHER_STATS_NUM;
488 }
489
490 for (k = 0; k < n; k++) {
491 *data += le64_to_cpu(*desc_data);
492 data++;
493 desc_data++;
494 }
495 }
496
497 kfree(desc);
498
499 return 0;
500}
501
502static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503{
504 struct hclge_desc desc;
505 __le32 *desc_data;
506 u32 reg_num;
507 int ret;
508
509 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 if (ret)
512 return ret;
513
514 desc_data = (__le32 *)(&desc.data[0]);
515 reg_num = le32_to_cpu(*desc_data);
516
517 *desc_num = 1 + ((reg_num - 3) >> 2) +
518 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520 return 0;
521}
522
523static int hclge_mac_update_stats(struct hclge_dev *hdev)
524{
525 u32 desc_num;
526 int ret;
527
528 ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530
531 if (!ret)
532 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 else if (ret == -EOPNOTSUPP)
534 ret = hclge_mac_update_stats_defective(hdev);
535 else
536 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538 return ret;
539}
540
541static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542{
543 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 struct hclge_vport *vport = hclge_get_vport(handle);
545 struct hclge_dev *hdev = vport->back;
546 struct hnae3_queue *queue;
547 struct hclge_desc desc[1];
548 struct hclge_tqp *tqp;
549 int ret, i;
550
551 for (i = 0; i < kinfo->num_tqps; i++) {
552 queue = handle->kinfo.tqp[i];
553 tqp = container_of(queue, struct hclge_tqp, q);
554
555 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556 true);
557
558 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 if (ret) {
561 dev_err(&hdev->pdev->dev,
562 "Query tqp stat fail, status = %d,queue = %d\n",
563 ret, i);
564 return ret;
565 }
566 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 le32_to_cpu(desc[0].data[1]);
568 }
569
570 for (i = 0; i < kinfo->num_tqps; i++) {
571 queue = handle->kinfo.tqp[i];
572 tqp = container_of(queue, struct hclge_tqp, q);
573
574 hclge_cmd_setup_basic_desc(&desc[0],
575 HCLGE_OPC_QUERY_TX_STATS,
576 true);
577
578 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 if (ret) {
581 dev_err(&hdev->pdev->dev,
582 "Query tqp stat fail, status = %d,queue = %d\n",
583 ret, i);
584 return ret;
585 }
586 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 le32_to_cpu(desc[0].data[1]);
588 }
589
590 return 0;
591}
592
593static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594{
595 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 struct hclge_tqp *tqp;
597 u64 *buff = data;
598 int i;
599
600 for (i = 0; i < kinfo->num_tqps; i++) {
601 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 }
604
605 for (i = 0; i < kinfo->num_tqps; i++) {
606 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608 }
609
610 return buff;
611}
612
613static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614{
615 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617
618 return kinfo->num_tqps * (2);
619}
620
621static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622{
623 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 u8 *buff = data;
625 int i = 0;
626
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 struct hclge_tqp, q);
630 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631 tqp->index);
632 buff = buff + ETH_GSTRING_LEN;
633 }
634
635 for (i = 0; i < kinfo->num_tqps; i++) {
636 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 struct hclge_tqp, q);
638 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639 tqp->index);
640 buff = buff + ETH_GSTRING_LEN;
641 }
642
643 return buff;
644}
645
646static u64 *hclge_comm_get_stats(const void *comm_stats,
647 const struct hclge_comm_stats_str strs[],
648 int size, u64 *data)
649{
650 u64 *buf = data;
651 u32 i;
652
653 for (i = 0; i < size; i++)
654 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656 return buf + size;
657}
658
659static u8 *hclge_comm_get_strings(u32 stringset,
660 const struct hclge_comm_stats_str strs[],
661 int size, u8 *data)
662{
663 char *buff = (char *)data;
664 u32 i;
665
666 if (stringset != ETH_SS_STATS)
667 return buff;
668
669 for (i = 0; i < size; i++) {
670 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 buff = buff + ETH_GSTRING_LEN;
672 }
673
674 return (u8 *)buff;
675}
676
677static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678{
679 struct hnae3_handle *handle;
680 int status;
681
682 handle = &hdev->vport[0].nic;
683 if (handle->client) {
684 status = hclge_tqps_update_stats(handle);
685 if (status) {
686 dev_err(&hdev->pdev->dev,
687 "Update TQPS stats fail, status = %d.\n",
688 status);
689 }
690 }
691
692 status = hclge_mac_update_stats(hdev);
693 if (status)
694 dev_err(&hdev->pdev->dev,
695 "Update MAC stats fail, status = %d.\n", status);
696}
697
698static void hclge_update_stats(struct hnae3_handle *handle,
699 struct net_device_stats *net_stats)
700{
701 struct hclge_vport *vport = hclge_get_vport(handle);
702 struct hclge_dev *hdev = vport->back;
703 int status;
704
705 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 return;
707
708 status = hclge_mac_update_stats(hdev);
709 if (status)
710 dev_err(&hdev->pdev->dev,
711 "Update MAC stats fail, status = %d.\n",
712 status);
713
714 status = hclge_tqps_update_stats(handle);
715 if (status)
716 dev_err(&hdev->pdev->dev,
717 "Update TQPS stats fail, status = %d.\n",
718 status);
719
720 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721}
722
723static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724{
725#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 HNAE3_SUPPORT_PHY_LOOPBACK |\
727 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730 struct hclge_vport *vport = hclge_get_vport(handle);
731 struct hclge_dev *hdev = vport->back;
732 int count = 0;
733
734
735
736
737
738
739 if (stringset == ETH_SS_TEST) {
740
741 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 if (hdev->pdev->revision >= 0x21 ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 count += 1;
747 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748 }
749
750 count += 2;
751 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754 if (hdev->hw.mac.phydev) {
755 count += 1;
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 }
758
759 } else if (stringset == ETH_SS_STATS) {
760 count = ARRAY_SIZE(g_mac_stats_string) +
761 hclge_tqps_get_sset_count(handle, stringset);
762 }
763
764 return count;
765}
766
767static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *data)
769{
770 u8 *p = (char *)data;
771 int size;
772
773 if (stringset == ETH_SS_STATS) {
774 size = ARRAY_SIZE(g_mac_stats_string);
775 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 size, p);
777 p = hclge_tqps_get_strings(handle, p);
778 } else if (stringset == ETH_SS_TEST) {
779 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781 ETH_GSTRING_LEN);
782 p += ETH_GSTRING_LEN;
783 }
784 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786 ETH_GSTRING_LEN);
787 p += ETH_GSTRING_LEN;
788 }
789 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 memcpy(p,
791 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792 ETH_GSTRING_LEN);
793 p += ETH_GSTRING_LEN;
794 }
795 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797 ETH_GSTRING_LEN);
798 p += ETH_GSTRING_LEN;
799 }
800 }
801}
802
803static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804{
805 struct hclge_vport *vport = hclge_get_vport(handle);
806 struct hclge_dev *hdev = vport->back;
807 u64 *p;
808
809 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 ARRAY_SIZE(g_mac_stats_string), data);
811 p = hclge_tqps_get_stats(handle, p);
812}
813
814static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 struct hns3_mac_stats *mac_stats)
816{
817 struct hclge_vport *vport = hclge_get_vport(handle);
818 struct hclge_dev *hdev = vport->back;
819
820 hclge_update_stats(handle, NULL);
821
822 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824}
825
826static int hclge_parse_func_status(struct hclge_dev *hdev,
827 struct hclge_func_status_cmd *status)
828{
829#define HCLGE_MAC_ID_MASK 0xF
830
831 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 return -EINVAL;
833
834
835 if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 hdev->flag |= HCLGE_FLAG_MAIN;
837 else
838 hdev->flag &= ~HCLGE_FLAG_MAIN;
839
840 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841 return 0;
842}
843
844static int hclge_query_function_status(struct hclge_dev *hdev)
845{
846#define HCLGE_QUERY_MAX_CNT 5
847
848 struct hclge_func_status_cmd *req;
849 struct hclge_desc desc;
850 int timeout = 0;
851 int ret;
852
853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 req = (struct hclge_func_status_cmd *)desc.data;
855
856 do {
857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 if (ret) {
859 dev_err(&hdev->pdev->dev,
860 "query function status failed %d.\n", ret);
861 return ret;
862 }
863
864
865 if (req->pf_state)
866 break;
867 usleep_range(1000, 2000);
868 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
869
870 return hclge_parse_func_status(hdev, req);
871}
872
873static int hclge_query_pf_resource(struct hclge_dev *hdev)
874{
875 struct hclge_pf_res_cmd *req;
876 struct hclge_desc desc;
877 int ret;
878
879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 if (ret) {
882 dev_err(&hdev->pdev->dev,
883 "query pf resource failed %d.\n", ret);
884 return ret;
885 }
886
887 req = (struct hclge_pf_res_cmd *)desc.data;
888 hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890
891 if (req->tx_buf_size)
892 hdev->tx_buf_size =
893 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894 else
895 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896
897 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898
899 if (req->dv_buf_size)
900 hdev->dv_buf_size =
901 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902 else
903 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904
905 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906
907 if (hnae3_dev_roce_supported(hdev)) {
908 hdev->roce_base_msix_offset =
909 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911 hdev->num_roce_msi =
912 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914
915
916 hdev->num_nic_msi = hdev->num_roce_msi;
917
918
919
920
921 hdev->num_msi = hdev->num_roce_msi +
922 hdev->roce_base_msix_offset;
923 } else {
924 hdev->num_msi =
925 hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927
928 hdev->num_nic_msi = hdev->num_msi;
929 }
930
931 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 dev_err(&hdev->pdev->dev,
933 "Just %u msi resources, not enough for pf(min:2).\n",
934 hdev->num_nic_msi);
935 return -EINVAL;
936 }
937
938 return 0;
939}
940
941static int hclge_parse_speed(int speed_cmd, int *speed)
942{
943 switch (speed_cmd) {
944 case 6:
945 *speed = HCLGE_MAC_SPEED_10M;
946 break;
947 case 7:
948 *speed = HCLGE_MAC_SPEED_100M;
949 break;
950 case 0:
951 *speed = HCLGE_MAC_SPEED_1G;
952 break;
953 case 1:
954 *speed = HCLGE_MAC_SPEED_10G;
955 break;
956 case 2:
957 *speed = HCLGE_MAC_SPEED_25G;
958 break;
959 case 3:
960 *speed = HCLGE_MAC_SPEED_40G;
961 break;
962 case 4:
963 *speed = HCLGE_MAC_SPEED_50G;
964 break;
965 case 5:
966 *speed = HCLGE_MAC_SPEED_100G;
967 break;
968 default:
969 return -EINVAL;
970 }
971
972 return 0;
973}
974
975static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976{
977 struct hclge_vport *vport = hclge_get_vport(handle);
978 struct hclge_dev *hdev = vport->back;
979 u32 speed_ability = hdev->hw.mac.speed_ability;
980 u32 speed_bit = 0;
981
982 switch (speed) {
983 case HCLGE_MAC_SPEED_10M:
984 speed_bit = HCLGE_SUPPORT_10M_BIT;
985 break;
986 case HCLGE_MAC_SPEED_100M:
987 speed_bit = HCLGE_SUPPORT_100M_BIT;
988 break;
989 case HCLGE_MAC_SPEED_1G:
990 speed_bit = HCLGE_SUPPORT_1G_BIT;
991 break;
992 case HCLGE_MAC_SPEED_10G:
993 speed_bit = HCLGE_SUPPORT_10G_BIT;
994 break;
995 case HCLGE_MAC_SPEED_25G:
996 speed_bit = HCLGE_SUPPORT_25G_BIT;
997 break;
998 case HCLGE_MAC_SPEED_40G:
999 speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 break;
1001 case HCLGE_MAC_SPEED_50G:
1002 speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 break;
1004 case HCLGE_MAC_SPEED_100G:
1005 speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 break;
1007 default:
1008 return -EINVAL;
1009 }
1010
1011 if (speed_bit & speed_ability)
1012 return 0;
1013
1014 return -EINVAL;
1015}
1016
1017static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018{
1019 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021 mac->supported);
1022 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 mac->supported);
1025 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 mac->supported);
1028 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 mac->supported);
1031 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 mac->supported);
1034}
1035
1036static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037{
1038 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 mac->supported);
1041 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 mac->supported);
1044 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 mac->supported);
1047 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 mac->supported);
1050 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 mac->supported);
1053}
1054
1055static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056{
1057 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 mac->supported);
1060 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 mac->supported);
1063 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 mac->supported);
1066 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 mac->supported);
1069 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 mac->supported);
1072}
1073
1074static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075{
1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 mac->supported);
1079 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 mac->supported);
1082 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 mac->supported);
1085 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 mac->supported);
1088 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 mac->supported);
1091 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 mac->supported);
1094}
1095
1096static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097{
1098 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100
1101 switch (mac->speed) {
1102 case HCLGE_MAC_SPEED_10G:
1103 case HCLGE_MAC_SPEED_40G:
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 mac->supported);
1106 mac->fec_ability =
1107 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 break;
1109 case HCLGE_MAC_SPEED_25G:
1110 case HCLGE_MAC_SPEED_50G:
1111 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 mac->supported);
1113 mac->fec_ability =
1114 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 BIT(HNAE3_FEC_AUTO);
1116 break;
1117 case HCLGE_MAC_SPEED_100G:
1118 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 break;
1121 default:
1122 mac->fec_ability = 0;
1123 break;
1124 }
1125}
1126
1127static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 u8 speed_ability)
1129{
1130 struct hclge_mac *mac = &hdev->hw.mac;
1131
1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 mac->supported);
1135
1136 hclge_convert_setting_sr(mac, speed_ability);
1137 hclge_convert_setting_lr(mac, speed_ability);
1138 hclge_convert_setting_cr(mac, speed_ability);
1139 if (hdev->pdev->revision >= 0x21)
1140 hclge_convert_setting_fec(mac);
1141
1142 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145}
1146
1147static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 u8 speed_ability)
1149{
1150 struct hclge_mac *mac = &hdev->hw.mac;
1151
1152 hclge_convert_setting_kr(mac, speed_ability);
1153 if (hdev->pdev->revision >= 0x21)
1154 hclge_convert_setting_fec(mac);
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158}
1159
1160static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 u8 speed_ability)
1162{
1163 unsigned long *supported = hdev->hw.mac.supported;
1164
1165
1166 if (!speed_ability)
1167 speed_ability = HCLGE_SUPPORT_GE;
1168
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 supported);
1172
1173 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 supported);
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 supported);
1178 }
1179
1180 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 }
1184
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189}
1190
1191static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192{
1193 u8 media_type = hdev->hw.mac.media_type;
1194
1195 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 hclge_parse_copper_link_mode(hdev, speed_ability);
1199 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 hclge_parse_backplane_link_mode(hdev, speed_ability);
1201}
1202
1203static u32 hclge_get_max_speed(u8 speed_ability)
1204{
1205 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 return HCLGE_MAC_SPEED_100G;
1207
1208 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 return HCLGE_MAC_SPEED_50G;
1210
1211 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 return HCLGE_MAC_SPEED_40G;
1213
1214 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 return HCLGE_MAC_SPEED_25G;
1216
1217 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 return HCLGE_MAC_SPEED_10G;
1219
1220 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 return HCLGE_MAC_SPEED_1G;
1222
1223 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 return HCLGE_MAC_SPEED_100M;
1225
1226 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 return HCLGE_MAC_SPEED_10M;
1228
1229 return HCLGE_MAC_SPEED_1G;
1230}
1231
1232static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233{
1234 struct hclge_cfg_param_cmd *req;
1235 u64 mac_addr_tmp_high;
1236 u64 mac_addr_tmp;
1237 unsigned int i;
1238
1239 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240
1241
1242 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_VMDQ_M,
1244 HCLGE_CFG_VMDQ_S);
1245 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 HCLGE_CFG_TQP_DESC_N_M,
1249 HCLGE_CFG_TQP_DESC_N_S);
1250
1251 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_PHY_ADDR_M,
1253 HCLGE_CFG_PHY_ADDR_S);
1254 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_MEDIA_TP_M,
1256 HCLGE_CFG_MEDIA_TP_S);
1257 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 HCLGE_CFG_RX_BUF_LEN_M,
1259 HCLGE_CFG_RX_BUF_LEN_S);
1260
1261 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 HCLGE_CFG_MAC_ADDR_H_M,
1264 HCLGE_CFG_MAC_ADDR_H_S);
1265
1266 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267
1268 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_DEFAULT_SPEED_M,
1270 HCLGE_CFG_DEFAULT_SPEED_S);
1271 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 HCLGE_CFG_RSS_SIZE_M,
1273 HCLGE_CFG_RSS_SIZE_S);
1274
1275 for (i = 0; i < ETH_ALEN; i++)
1276 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277
1278 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280
1281 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_SPEED_ABILITY_M,
1283 HCLGE_CFG_SPEED_ABILITY_S);
1284 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 if (!cfg->umv_space)
1288 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289}
1290
1291
1292
1293
1294
1295static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296{
1297 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 struct hclge_cfg_param_cmd *req;
1299 unsigned int i;
1300 int ret;
1301
1302 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303 u32 offset = 0;
1304
1305 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 true);
1308 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310
1311 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 req->offset = cpu_to_le32(offset);
1314 }
1315
1316 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 if (ret) {
1318 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 return ret;
1320 }
1321
1322 hclge_parse_cfg(hcfg, desc);
1323
1324 return 0;
1325}
1326
1327static int hclge_get_cap(struct hclge_dev *hdev)
1328{
1329 int ret;
1330
1331 ret = hclge_query_function_status(hdev);
1332 if (ret) {
1333 dev_err(&hdev->pdev->dev,
1334 "query function status error %d.\n", ret);
1335 return ret;
1336 }
1337
1338
1339 return hclge_query_pf_resource(hdev);
1340}
1341
1342static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343{
1344#define HCLGE_MIN_TX_DESC 64
1345#define HCLGE_MIN_RX_DESC 64
1346
1347 if (!is_kdump_kernel())
1348 return;
1349
1350 dev_info(&hdev->pdev->dev,
1351 "Running kdump kernel. Using minimal resources\n");
1352
1353
1354 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357}
1358
1359static int hclge_configure(struct hclge_dev *hdev)
1360{
1361 struct hclge_cfg cfg;
1362 unsigned int i;
1363 int ret;
1364
1365 ret = hclge_get_cfg(hdev, &cfg);
1366 if (ret)
1367 return ret;
1368
1369 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 hdev->base_tqp_pid = 0;
1371 hdev->rss_size_max = cfg.rss_size_max;
1372 hdev->rx_buf_len = cfg.rx_buf_len;
1373 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 hdev->hw.mac.media_type = cfg.media_type;
1375 hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 hdev->num_tx_desc = cfg.tqp_desc_num;
1377 hdev->num_rx_desc = cfg.tqp_desc_num;
1378 hdev->tm_info.num_pg = 1;
1379 hdev->tc_max = cfg.tc_num;
1380 hdev->tm_info.hw_pfc_map = 0;
1381 hdev->wanted_umv_size = cfg.umv_space;
1382
1383 if (hnae3_dev_fd_supported(hdev)) {
1384 hdev->fd_en = true;
1385 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 }
1387
1388 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 if (ret) {
1390 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1391 cfg.default_speed, ret);
1392 return ret;
1393 }
1394
1395 hclge_parse_link_mode(hdev, cfg.speed_ability);
1396
1397 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398
1399 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 (hdev->tc_max < 1)) {
1401 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 hdev->tc_max);
1403 hdev->tc_max = 1;
1404 }
1405
1406
1407 if (!hnae3_dev_dcb_supported(hdev)) {
1408 hdev->tc_max = 1;
1409 hdev->pfc_max = 0;
1410 } else {
1411 hdev->pfc_max = hdev->tc_max;
1412 }
1413
1414 hdev->tm_info.num_tc = 1;
1415
1416
1417 for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419
1420 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421
1422 hclge_init_kdump_kernel_config(hdev);
1423
1424
1425 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 &hdev->affinity_mask);
1429
1430 return ret;
1431}
1432
1433static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1434 u16 tso_mss_max)
1435{
1436 struct hclge_cfg_tso_status_cmd *req;
1437 struct hclge_desc desc;
1438
1439 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440
1441 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1443 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1444
1445 return hclge_cmd_send(&hdev->hw, &desc, 1);
1446}
1447
1448static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1449{
1450 struct hclge_cfg_gro_status_cmd *req;
1451 struct hclge_desc desc;
1452 int ret;
1453
1454 if (!hnae3_dev_gro_supported(hdev))
1455 return 0;
1456
1457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1458 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1459
1460 req->gro_en = en ? 1 : 0;
1461
1462 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1463 if (ret)
1464 dev_err(&hdev->pdev->dev,
1465 "GRO hardware config cmd failed, ret = %d\n", ret);
1466
1467 return ret;
1468}
1469
1470static int hclge_alloc_tqps(struct hclge_dev *hdev)
1471{
1472 struct hclge_tqp *tqp;
1473 int i;
1474
1475 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1476 sizeof(struct hclge_tqp), GFP_KERNEL);
1477 if (!hdev->htqp)
1478 return -ENOMEM;
1479
1480 tqp = hdev->htqp;
1481
1482 for (i = 0; i < hdev->num_tqps; i++) {
1483 tqp->dev = &hdev->pdev->dev;
1484 tqp->index = i;
1485
1486 tqp->q.ae_algo = &ae_algo;
1487 tqp->q.buf_size = hdev->rx_buf_len;
1488 tqp->q.tx_desc_num = hdev->num_tx_desc;
1489 tqp->q.rx_desc_num = hdev->num_rx_desc;
1490 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1491 i * HCLGE_TQP_REG_SIZE;
1492
1493 tqp++;
1494 }
1495
1496 return 0;
1497}
1498
1499static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1500 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1501{
1502 struct hclge_tqp_map_cmd *req;
1503 struct hclge_desc desc;
1504 int ret;
1505
1506 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1507
1508 req = (struct hclge_tqp_map_cmd *)desc.data;
1509 req->tqp_id = cpu_to_le16(tqp_pid);
1510 req->tqp_vf = func_id;
1511 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1512 if (!is_pf)
1513 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1514 req->tqp_vid = cpu_to_le16(tqp_vid);
1515
1516 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517 if (ret)
1518 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1519
1520 return ret;
1521}
1522
1523static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1524{
1525 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1526 struct hclge_dev *hdev = vport->back;
1527 int i, alloced;
1528
1529 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1530 alloced < num_tqps; i++) {
1531 if (!hdev->htqp[i].alloced) {
1532 hdev->htqp[i].q.handle = &vport->nic;
1533 hdev->htqp[i].q.tqp_index = alloced;
1534 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1535 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1536 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1537 hdev->htqp[i].alloced = true;
1538 alloced++;
1539 }
1540 }
1541 vport->alloc_tqps = alloced;
1542 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1543 vport->alloc_tqps / hdev->tm_info.num_tc);
1544
1545
1546 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1547 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1548
1549 return 0;
1550}
1551
1552static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1553 u16 num_tx_desc, u16 num_rx_desc)
1554
1555{
1556 struct hnae3_handle *nic = &vport->nic;
1557 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1558 struct hclge_dev *hdev = vport->back;
1559 int ret;
1560
1561 kinfo->num_tx_desc = num_tx_desc;
1562 kinfo->num_rx_desc = num_rx_desc;
1563
1564 kinfo->rx_buf_len = hdev->rx_buf_len;
1565
1566 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1567 sizeof(struct hnae3_queue *), GFP_KERNEL);
1568 if (!kinfo->tqp)
1569 return -ENOMEM;
1570
1571 ret = hclge_assign_tqp(vport, num_tqps);
1572 if (ret)
1573 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1574
1575 return ret;
1576}
1577
1578static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1579 struct hclge_vport *vport)
1580{
1581 struct hnae3_handle *nic = &vport->nic;
1582 struct hnae3_knic_private_info *kinfo;
1583 u16 i;
1584
1585 kinfo = &nic->kinfo;
1586 for (i = 0; i < vport->alloc_tqps; i++) {
1587 struct hclge_tqp *q =
1588 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1589 bool is_pf;
1590 int ret;
1591
1592 is_pf = !(vport->vport_id);
1593 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1594 i, is_pf);
1595 if (ret)
1596 return ret;
1597 }
1598
1599 return 0;
1600}
1601
1602static int hclge_map_tqp(struct hclge_dev *hdev)
1603{
1604 struct hclge_vport *vport = hdev->vport;
1605 u16 i, num_vport;
1606
1607 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1608 for (i = 0; i < num_vport; i++) {
1609 int ret;
1610
1611 ret = hclge_map_tqp_to_vport(hdev, vport);
1612 if (ret)
1613 return ret;
1614
1615 vport++;
1616 }
1617
1618 return 0;
1619}
1620
1621static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1622{
1623 struct hnae3_handle *nic = &vport->nic;
1624 struct hclge_dev *hdev = vport->back;
1625 int ret;
1626
1627 nic->pdev = hdev->pdev;
1628 nic->ae_algo = &ae_algo;
1629 nic->numa_node_mask = hdev->numa_node_mask;
1630
1631 ret = hclge_knic_setup(vport, num_tqps,
1632 hdev->num_tx_desc, hdev->num_rx_desc);
1633 if (ret)
1634 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1635
1636 return ret;
1637}
1638
1639static int hclge_alloc_vport(struct hclge_dev *hdev)
1640{
1641 struct pci_dev *pdev = hdev->pdev;
1642 struct hclge_vport *vport;
1643 u32 tqp_main_vport;
1644 u32 tqp_per_vport;
1645 int num_vport, i;
1646 int ret;
1647
1648
1649 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1650
1651 if (hdev->num_tqps < num_vport) {
1652 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1653 hdev->num_tqps, num_vport);
1654 return -EINVAL;
1655 }
1656
1657
1658 tqp_per_vport = hdev->num_tqps / num_vport;
1659 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1660
1661 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1662 GFP_KERNEL);
1663 if (!vport)
1664 return -ENOMEM;
1665
1666 hdev->vport = vport;
1667 hdev->num_alloc_vport = num_vport;
1668
1669 if (IS_ENABLED(CONFIG_PCI_IOV))
1670 hdev->num_alloc_vfs = hdev->num_req_vfs;
1671
1672 for (i = 0; i < num_vport; i++) {
1673 vport->back = hdev;
1674 vport->vport_id = i;
1675 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1676 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1677 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1678 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1679 INIT_LIST_HEAD(&vport->vlan_list);
1680 INIT_LIST_HEAD(&vport->uc_mac_list);
1681 INIT_LIST_HEAD(&vport->mc_mac_list);
1682 spin_lock_init(&vport->mac_list_lock);
1683
1684 if (i == 0)
1685 ret = hclge_vport_setup(vport, tqp_main_vport);
1686 else
1687 ret = hclge_vport_setup(vport, tqp_per_vport);
1688 if (ret) {
1689 dev_err(&pdev->dev,
1690 "vport setup failed for vport %d, %d\n",
1691 i, ret);
1692 return ret;
1693 }
1694
1695 vport++;
1696 }
1697
1698 return 0;
1699}
1700
1701static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1702 struct hclge_pkt_buf_alloc *buf_alloc)
1703{
1704
1705#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1706#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1707 struct hclge_tx_buff_alloc_cmd *req;
1708 struct hclge_desc desc;
1709 int ret;
1710 u8 i;
1711
1712 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1713
1714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1715 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1716 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1717
1718 req->tx_pkt_buff[i] =
1719 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1720 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1721 }
1722
1723 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1724 if (ret)
1725 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1726 ret);
1727
1728 return ret;
1729}
1730
1731static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1732 struct hclge_pkt_buf_alloc *buf_alloc)
1733{
1734 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1735
1736 if (ret)
1737 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1738
1739 return ret;
1740}
1741
1742static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1743{
1744 unsigned int i;
1745 u32 cnt = 0;
1746
1747 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1748 if (hdev->hw_tc_map & BIT(i))
1749 cnt++;
1750 return cnt;
1751}
1752
1753
1754static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1755 struct hclge_pkt_buf_alloc *buf_alloc)
1756{
1757 struct hclge_priv_buf *priv;
1758 unsigned int i;
1759 int cnt = 0;
1760
1761 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1762 priv = &buf_alloc->priv_buf[i];
1763 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1764 priv->enable)
1765 cnt++;
1766 }
1767
1768 return cnt;
1769}
1770
1771
1772static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1773 struct hclge_pkt_buf_alloc *buf_alloc)
1774{
1775 struct hclge_priv_buf *priv;
1776 unsigned int i;
1777 int cnt = 0;
1778
1779 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1780 priv = &buf_alloc->priv_buf[i];
1781 if (hdev->hw_tc_map & BIT(i) &&
1782 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1783 priv->enable)
1784 cnt++;
1785 }
1786
1787 return cnt;
1788}
1789
1790static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1791{
1792 struct hclge_priv_buf *priv;
1793 u32 rx_priv = 0;
1794 int i;
1795
1796 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1797 priv = &buf_alloc->priv_buf[i];
1798 if (priv->enable)
1799 rx_priv += priv->buf_size;
1800 }
1801 return rx_priv;
1802}
1803
1804static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1805{
1806 u32 i, total_tx_size = 0;
1807
1808 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1809 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1810
1811 return total_tx_size;
1812}
1813
1814static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1815 struct hclge_pkt_buf_alloc *buf_alloc,
1816 u32 rx_all)
1817{
1818 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1819 u32 tc_num = hclge_get_tc_num(hdev);
1820 u32 shared_buf, aligned_mps;
1821 u32 rx_priv;
1822 int i;
1823
1824 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1825
1826 if (hnae3_dev_dcb_supported(hdev))
1827 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1828 hdev->dv_buf_size;
1829 else
1830 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1831 + hdev->dv_buf_size;
1832
1833 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1834 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1835 HCLGE_BUF_SIZE_UNIT);
1836
1837 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1838 if (rx_all < rx_priv + shared_std)
1839 return false;
1840
1841 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1842 buf_alloc->s_buf.buf_size = shared_buf;
1843 if (hnae3_dev_dcb_supported(hdev)) {
1844 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1845 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1846 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1847 HCLGE_BUF_SIZE_UNIT);
1848 } else {
1849 buf_alloc->s_buf.self.high = aligned_mps +
1850 HCLGE_NON_DCB_ADDITIONAL_BUF;
1851 buf_alloc->s_buf.self.low = aligned_mps;
1852 }
1853
1854 if (hnae3_dev_dcb_supported(hdev)) {
1855 hi_thrd = shared_buf - hdev->dv_buf_size;
1856
1857 if (tc_num <= NEED_RESERVE_TC_NUM)
1858 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1859 / BUF_MAX_PERCENT;
1860
1861 if (tc_num)
1862 hi_thrd = hi_thrd / tc_num;
1863
1864 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1865 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1866 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1867 } else {
1868 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1869 lo_thrd = aligned_mps;
1870 }
1871
1872 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1874 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1875 }
1876
1877 return true;
1878}
1879
1880static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1881 struct hclge_pkt_buf_alloc *buf_alloc)
1882{
1883 u32 i, total_size;
1884
1885 total_size = hdev->pkt_buf_size;
1886
1887
1888 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890
1891 if (hdev->hw_tc_map & BIT(i)) {
1892 if (total_size < hdev->tx_buf_size)
1893 return -ENOMEM;
1894
1895 priv->tx_buf_size = hdev->tx_buf_size;
1896 } else {
1897 priv->tx_buf_size = 0;
1898 }
1899
1900 total_size -= priv->tx_buf_size;
1901 }
1902
1903 return 0;
1904}
1905
1906static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1907 struct hclge_pkt_buf_alloc *buf_alloc)
1908{
1909 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1910 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1911 unsigned int i;
1912
1913 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1914 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1915
1916 priv->enable = 0;
1917 priv->wl.low = 0;
1918 priv->wl.high = 0;
1919 priv->buf_size = 0;
1920
1921 if (!(hdev->hw_tc_map & BIT(i)))
1922 continue;
1923
1924 priv->enable = 1;
1925
1926 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1927 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1928 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1929 HCLGE_BUF_SIZE_UNIT);
1930 } else {
1931 priv->wl.low = 0;
1932 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1933 aligned_mps;
1934 }
1935
1936 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1937 }
1938
1939 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1940}
1941
1942static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1943 struct hclge_pkt_buf_alloc *buf_alloc)
1944{
1945 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1946 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1947 int i;
1948
1949
1950 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1951 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1952 unsigned int mask = BIT((unsigned int)i);
1953
1954 if (hdev->hw_tc_map & mask &&
1955 !(hdev->tm_info.hw_pfc_map & mask)) {
1956
1957 priv->wl.low = 0;
1958 priv->wl.high = 0;
1959 priv->buf_size = 0;
1960 priv->enable = 0;
1961 no_pfc_priv_num--;
1962 }
1963
1964 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1965 no_pfc_priv_num == 0)
1966 break;
1967 }
1968
1969 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1970}
1971
1972static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1973 struct hclge_pkt_buf_alloc *buf_alloc)
1974{
1975 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1976 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1977 int i;
1978
1979
1980 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1981 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1982 unsigned int mask = BIT((unsigned int)i);
1983
1984 if (hdev->hw_tc_map & mask &&
1985 hdev->tm_info.hw_pfc_map & mask) {
1986
1987 priv->wl.low = 0;
1988 priv->enable = 0;
1989 priv->wl.high = 0;
1990 priv->buf_size = 0;
1991 pfc_priv_num--;
1992 }
1993
1994 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1995 pfc_priv_num == 0)
1996 break;
1997 }
1998
1999 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2000}
2001
2002static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2003 struct hclge_pkt_buf_alloc *buf_alloc)
2004{
2005#define COMPENSATE_BUFFER 0x3C00
2006#define COMPENSATE_HALF_MPS_NUM 5
2007#define PRIV_WL_GAP 0x1800
2008
2009 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2010 u32 tc_num = hclge_get_tc_num(hdev);
2011 u32 half_mps = hdev->mps >> 1;
2012 u32 min_rx_priv;
2013 unsigned int i;
2014
2015 if (tc_num)
2016 rx_priv = rx_priv / tc_num;
2017
2018 if (tc_num <= NEED_RESERVE_TC_NUM)
2019 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2020
2021 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2022 COMPENSATE_HALF_MPS_NUM * half_mps;
2023 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2024 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2025
2026 if (rx_priv < min_rx_priv)
2027 return false;
2028
2029 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2030 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2031
2032 priv->enable = 0;
2033 priv->wl.low = 0;
2034 priv->wl.high = 0;
2035 priv->buf_size = 0;
2036
2037 if (!(hdev->hw_tc_map & BIT(i)))
2038 continue;
2039
2040 priv->enable = 1;
2041 priv->buf_size = rx_priv;
2042 priv->wl.high = rx_priv - hdev->dv_buf_size;
2043 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2044 }
2045
2046 buf_alloc->s_buf.buf_size = 0;
2047
2048 return true;
2049}
2050
2051
2052
2053
2054
2055
2056static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2057 struct hclge_pkt_buf_alloc *buf_alloc)
2058{
2059
2060 if (!hnae3_dev_dcb_supported(hdev)) {
2061 u32 rx_all = hdev->pkt_buf_size;
2062
2063 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2064 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2065 return -ENOMEM;
2066
2067 return 0;
2068 }
2069
2070 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2071 return 0;
2072
2073 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2074 return 0;
2075
2076
2077 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2078 return 0;
2079
2080 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2081 return 0;
2082
2083 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2084 return 0;
2085
2086 return -ENOMEM;
2087}
2088
2089static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2090 struct hclge_pkt_buf_alloc *buf_alloc)
2091{
2092 struct hclge_rx_priv_buff_cmd *req;
2093 struct hclge_desc desc;
2094 int ret;
2095 int i;
2096
2097 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2098 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2099
2100
2101 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103
2104 req->buf_num[i] =
2105 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2106 req->buf_num[i] |=
2107 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2108 }
2109
2110 req->shared_buf =
2111 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2112 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2113
2114 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2115 if (ret)
2116 dev_err(&hdev->pdev->dev,
2117 "rx private buffer alloc cmd failed %d\n", ret);
2118
2119 return ret;
2120}
2121
2122static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2123 struct hclge_pkt_buf_alloc *buf_alloc)
2124{
2125 struct hclge_rx_priv_wl_buf *req;
2126 struct hclge_priv_buf *priv;
2127 struct hclge_desc desc[2];
2128 int i, j;
2129 int ret;
2130
2131 for (i = 0; i < 2; i++) {
2132 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2133 false);
2134 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2135
2136
2137 if (i == 0)
2138 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2139 else
2140 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2141
2142 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2143 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2144
2145 priv = &buf_alloc->priv_buf[idx];
2146 req->tc_wl[j].high =
2147 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2148 req->tc_wl[j].high |=
2149 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2150 req->tc_wl[j].low =
2151 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2152 req->tc_wl[j].low |=
2153 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154 }
2155 }
2156
2157
2158 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2159 if (ret)
2160 dev_err(&hdev->pdev->dev,
2161 "rx private waterline config cmd failed %d\n",
2162 ret);
2163 return ret;
2164}
2165
2166static int hclge_common_thrd_config(struct hclge_dev *hdev,
2167 struct hclge_pkt_buf_alloc *buf_alloc)
2168{
2169 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2170 struct hclge_rx_com_thrd *req;
2171 struct hclge_desc desc[2];
2172 struct hclge_tc_thrd *tc;
2173 int i, j;
2174 int ret;
2175
2176 for (i = 0; i < 2; i++) {
2177 hclge_cmd_setup_basic_desc(&desc[i],
2178 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2179 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2180
2181
2182 if (i == 0)
2183 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2184 else
2185 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2186
2187 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2188 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2189
2190 req->com_thrd[j].high =
2191 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2192 req->com_thrd[j].high |=
2193 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2194 req->com_thrd[j].low =
2195 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2196 req->com_thrd[j].low |=
2197 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 }
2199 }
2200
2201
2202 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2203 if (ret)
2204 dev_err(&hdev->pdev->dev,
2205 "common threshold config cmd failed %d\n", ret);
2206 return ret;
2207}
2208
2209static int hclge_common_wl_config(struct hclge_dev *hdev,
2210 struct hclge_pkt_buf_alloc *buf_alloc)
2211{
2212 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2213 struct hclge_rx_com_wl *req;
2214 struct hclge_desc desc;
2215 int ret;
2216
2217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2218
2219 req = (struct hclge_rx_com_wl *)desc.data;
2220 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2221 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2222
2223 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2224 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2225
2226 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 if (ret)
2228 dev_err(&hdev->pdev->dev,
2229 "common waterline config cmd failed %d\n", ret);
2230
2231 return ret;
2232}
2233
2234int hclge_buffer_alloc(struct hclge_dev *hdev)
2235{
2236 struct hclge_pkt_buf_alloc *pkt_buf;
2237 int ret;
2238
2239 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2240 if (!pkt_buf)
2241 return -ENOMEM;
2242
2243 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2244 if (ret) {
2245 dev_err(&hdev->pdev->dev,
2246 "could not calc tx buffer size for all TCs %d\n", ret);
2247 goto out;
2248 }
2249
2250 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2251 if (ret) {
2252 dev_err(&hdev->pdev->dev,
2253 "could not alloc tx buffers %d\n", ret);
2254 goto out;
2255 }
2256
2257 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2258 if (ret) {
2259 dev_err(&hdev->pdev->dev,
2260 "could not calc rx priv buffer size for all TCs %d\n",
2261 ret);
2262 goto out;
2263 }
2264
2265 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2266 if (ret) {
2267 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2268 ret);
2269 goto out;
2270 }
2271
2272 if (hnae3_dev_dcb_supported(hdev)) {
2273 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2274 if (ret) {
2275 dev_err(&hdev->pdev->dev,
2276 "could not configure rx private waterline %d\n",
2277 ret);
2278 goto out;
2279 }
2280
2281 ret = hclge_common_thrd_config(hdev, pkt_buf);
2282 if (ret) {
2283 dev_err(&hdev->pdev->dev,
2284 "could not configure common threshold %d\n",
2285 ret);
2286 goto out;
2287 }
2288 }
2289
2290 ret = hclge_common_wl_config(hdev, pkt_buf);
2291 if (ret)
2292 dev_err(&hdev->pdev->dev,
2293 "could not configure common waterline %d\n", ret);
2294
2295out:
2296 kfree(pkt_buf);
2297 return ret;
2298}
2299
2300static int hclge_init_roce_base_info(struct hclge_vport *vport)
2301{
2302 struct hnae3_handle *roce = &vport->roce;
2303 struct hnae3_handle *nic = &vport->nic;
2304
2305 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2306
2307 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2308 vport->back->num_msi_left == 0)
2309 return -EINVAL;
2310
2311 roce->rinfo.base_vector = vport->back->roce_base_vector;
2312
2313 roce->rinfo.netdev = nic->kinfo.netdev;
2314 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2315
2316 roce->pdev = nic->pdev;
2317 roce->ae_algo = nic->ae_algo;
2318 roce->numa_node_mask = nic->numa_node_mask;
2319
2320 return 0;
2321}
2322
2323static int hclge_init_msi(struct hclge_dev *hdev)
2324{
2325 struct pci_dev *pdev = hdev->pdev;
2326 int vectors;
2327 int i;
2328
2329 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2330 hdev->num_msi,
2331 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2332 if (vectors < 0) {
2333 dev_err(&pdev->dev,
2334 "failed(%d) to allocate MSI/MSI-X vectors\n",
2335 vectors);
2336 return vectors;
2337 }
2338 if (vectors < hdev->num_msi)
2339 dev_warn(&hdev->pdev->dev,
2340 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2341 hdev->num_msi, vectors);
2342
2343 hdev->num_msi = vectors;
2344 hdev->num_msi_left = vectors;
2345
2346 hdev->base_msi_vector = pdev->irq;
2347 hdev->roce_base_vector = hdev->base_msi_vector +
2348 hdev->roce_base_msix_offset;
2349
2350 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2351 sizeof(u16), GFP_KERNEL);
2352 if (!hdev->vector_status) {
2353 pci_free_irq_vectors(pdev);
2354 return -ENOMEM;
2355 }
2356
2357 for (i = 0; i < hdev->num_msi; i++)
2358 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2359
2360 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(int), GFP_KERNEL);
2362 if (!hdev->vector_irq) {
2363 pci_free_irq_vectors(pdev);
2364 return -ENOMEM;
2365 }
2366
2367 return 0;
2368}
2369
2370static u8 hclge_check_speed_dup(u8 duplex, int speed)
2371{
2372 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2373 duplex = HCLGE_MAC_FULL;
2374
2375 return duplex;
2376}
2377
2378static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2379 u8 duplex)
2380{
2381 struct hclge_config_mac_speed_dup_cmd *req;
2382 struct hclge_desc desc;
2383 int ret;
2384
2385 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2386
2387 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2388
2389 if (duplex)
2390 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2391
2392 switch (speed) {
2393 case HCLGE_MAC_SPEED_10M:
2394 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2395 HCLGE_CFG_SPEED_S, 6);
2396 break;
2397 case HCLGE_MAC_SPEED_100M:
2398 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 HCLGE_CFG_SPEED_S, 7);
2400 break;
2401 case HCLGE_MAC_SPEED_1G:
2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 HCLGE_CFG_SPEED_S, 0);
2404 break;
2405 case HCLGE_MAC_SPEED_10G:
2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 HCLGE_CFG_SPEED_S, 1);
2408 break;
2409 case HCLGE_MAC_SPEED_25G:
2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 HCLGE_CFG_SPEED_S, 2);
2412 break;
2413 case HCLGE_MAC_SPEED_40G:
2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 HCLGE_CFG_SPEED_S, 3);
2416 break;
2417 case HCLGE_MAC_SPEED_50G:
2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 HCLGE_CFG_SPEED_S, 4);
2420 break;
2421 case HCLGE_MAC_SPEED_100G:
2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 HCLGE_CFG_SPEED_S, 5);
2424 break;
2425 default:
2426 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2427 return -EINVAL;
2428 }
2429
2430 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2431 1);
2432
2433 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2434 if (ret) {
2435 dev_err(&hdev->pdev->dev,
2436 "mac speed/duplex config cmd failed %d.\n", ret);
2437 return ret;
2438 }
2439
2440 return 0;
2441}
2442
2443int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2444{
2445 struct hclge_mac *mac = &hdev->hw.mac;
2446 int ret;
2447
2448 duplex = hclge_check_speed_dup(duplex, speed);
2449 if (!mac->support_autoneg && mac->speed == speed &&
2450 mac->duplex == duplex)
2451 return 0;
2452
2453 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2454 if (ret)
2455 return ret;
2456
2457 hdev->hw.mac.speed = speed;
2458 hdev->hw.mac.duplex = duplex;
2459
2460 return 0;
2461}
2462
2463static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2464 u8 duplex)
2465{
2466 struct hclge_vport *vport = hclge_get_vport(handle);
2467 struct hclge_dev *hdev = vport->back;
2468
2469 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2470}
2471
2472static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2473{
2474 struct hclge_config_auto_neg_cmd *req;
2475 struct hclge_desc desc;
2476 u32 flag = 0;
2477 int ret;
2478
2479 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2480
2481 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2482 if (enable)
2483 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2484 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2485
2486 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2487 if (ret)
2488 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2489 ret);
2490
2491 return ret;
2492}
2493
2494static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2495{
2496 struct hclge_vport *vport = hclge_get_vport(handle);
2497 struct hclge_dev *hdev = vport->back;
2498
2499 if (!hdev->hw.mac.support_autoneg) {
2500 if (enable) {
2501 dev_err(&hdev->pdev->dev,
2502 "autoneg is not supported by current port\n");
2503 return -EOPNOTSUPP;
2504 } else {
2505 return 0;
2506 }
2507 }
2508
2509 return hclge_set_autoneg_en(hdev, enable);
2510}
2511
2512static int hclge_get_autoneg(struct hnae3_handle *handle)
2513{
2514 struct hclge_vport *vport = hclge_get_vport(handle);
2515 struct hclge_dev *hdev = vport->back;
2516 struct phy_device *phydev = hdev->hw.mac.phydev;
2517
2518 if (phydev)
2519 return phydev->autoneg;
2520
2521 return hdev->hw.mac.autoneg;
2522}
2523
2524static int hclge_restart_autoneg(struct hnae3_handle *handle)
2525{
2526 struct hclge_vport *vport = hclge_get_vport(handle);
2527 struct hclge_dev *hdev = vport->back;
2528 int ret;
2529
2530 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2531
2532 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2533 if (ret)
2534 return ret;
2535 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536}
2537
2538static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2539{
2540 struct hclge_vport *vport = hclge_get_vport(handle);
2541 struct hclge_dev *hdev = vport->back;
2542
2543 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2544 return hclge_set_autoneg_en(hdev, !halt);
2545
2546 return 0;
2547}
2548
2549static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2550{
2551 struct hclge_config_fec_cmd *req;
2552 struct hclge_desc desc;
2553 int ret;
2554
2555 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2556
2557 req = (struct hclge_config_fec_cmd *)desc.data;
2558 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2559 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2560 if (fec_mode & BIT(HNAE3_FEC_RS))
2561 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2562 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2563 if (fec_mode & BIT(HNAE3_FEC_BASER))
2564 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2565 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2566
2567 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 if (ret)
2569 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2570
2571 return ret;
2572}
2573
2574static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2575{
2576 struct hclge_vport *vport = hclge_get_vport(handle);
2577 struct hclge_dev *hdev = vport->back;
2578 struct hclge_mac *mac = &hdev->hw.mac;
2579 int ret;
2580
2581 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2582 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2583 return -EINVAL;
2584 }
2585
2586 ret = hclge_set_fec_hw(hdev, fec_mode);
2587 if (ret)
2588 return ret;
2589
2590 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2591 return 0;
2592}
2593
2594static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2595 u8 *fec_mode)
2596{
2597 struct hclge_vport *vport = hclge_get_vport(handle);
2598 struct hclge_dev *hdev = vport->back;
2599 struct hclge_mac *mac = &hdev->hw.mac;
2600
2601 if (fec_ability)
2602 *fec_ability = mac->fec_ability;
2603 if (fec_mode)
2604 *fec_mode = mac->fec_mode;
2605}
2606
2607static int hclge_mac_init(struct hclge_dev *hdev)
2608{
2609 struct hclge_mac *mac = &hdev->hw.mac;
2610 int ret;
2611
2612 hdev->support_sfp_query = true;
2613 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2614 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2615 hdev->hw.mac.duplex);
2616 if (ret)
2617 return ret;
2618
2619 if (hdev->hw.mac.support_autoneg) {
2620 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2621 if (ret)
2622 return ret;
2623 }
2624
2625 mac->link = 0;
2626
2627 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2628 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2629 if (ret)
2630 return ret;
2631 }
2632
2633 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2634 if (ret) {
2635 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2636 return ret;
2637 }
2638
2639 ret = hclge_set_default_loopback(hdev);
2640 if (ret)
2641 return ret;
2642
2643 ret = hclge_buffer_alloc(hdev);
2644 if (ret)
2645 dev_err(&hdev->pdev->dev,
2646 "allocate buffer fail, ret=%d\n", ret);
2647
2648 return ret;
2649}
2650
2651static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2652{
2653 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2654 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2655 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2656 hclge_wq, &hdev->service_task, 0);
2657}
2658
2659static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2660{
2661 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2662 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2663 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2664 hclge_wq, &hdev->service_task, 0);
2665}
2666
2667void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2668{
2669 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2671 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2672 hclge_wq, &hdev->service_task,
2673 delay_time);
2674}
2675
2676static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2677{
2678 struct hclge_link_status_cmd *req;
2679 struct hclge_desc desc;
2680 int ret;
2681
2682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2684 if (ret) {
2685 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2686 ret);
2687 return ret;
2688 }
2689
2690 req = (struct hclge_link_status_cmd *)desc.data;
2691 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2692 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2693
2694 return 0;
2695}
2696
2697static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2698{
2699 struct phy_device *phydev = hdev->hw.mac.phydev;
2700
2701 *link_status = HCLGE_LINK_STATUS_DOWN;
2702
2703 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2704 return 0;
2705
2706 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2707 return 0;
2708
2709 return hclge_get_mac_link_status(hdev, link_status);
2710}
2711
2712static void hclge_update_link_status(struct hclge_dev *hdev)
2713{
2714 struct hnae3_client *rclient = hdev->roce_client;
2715 struct hnae3_client *client = hdev->nic_client;
2716 struct hnae3_handle *rhandle;
2717 struct hnae3_handle *handle;
2718 int state;
2719 int ret;
2720 int i;
2721
2722 if (!client)
2723 return;
2724
2725 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2726 return;
2727
2728 ret = hclge_get_mac_phy_link(hdev, &state);
2729 if (ret) {
2730 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2731 return;
2732 }
2733
2734 if (state != hdev->hw.mac.link) {
2735 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2736 handle = &hdev->vport[i].nic;
2737 client->ops->link_status_change(handle, state);
2738 hclge_config_mac_tnl_int(hdev, state);
2739 rhandle = &hdev->vport[i].roce;
2740 if (rclient && rclient->ops->link_status_change)
2741 rclient->ops->link_status_change(rhandle,
2742 state);
2743 }
2744 hdev->hw.mac.link = state;
2745 }
2746
2747 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2748}
2749
2750static void hclge_update_port_capability(struct hclge_mac *mac)
2751{
2752
2753 hclge_convert_setting_fec(mac);
2754
2755
2756
2757
2758 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2759 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2760 mac->module_type = HNAE3_MODULE_TYPE_KR;
2761 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2762 mac->module_type = HNAE3_MODULE_TYPE_TP;
2763
2764 if (mac->support_autoneg) {
2765 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2766 linkmode_copy(mac->advertising, mac->supported);
2767 } else {
2768 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2769 mac->supported);
2770 linkmode_zero(mac->advertising);
2771 }
2772}
2773
2774static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2775{
2776 struct hclge_sfp_info_cmd *resp;
2777 struct hclge_desc desc;
2778 int ret;
2779
2780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2781 resp = (struct hclge_sfp_info_cmd *)desc.data;
2782 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2783 if (ret == -EOPNOTSUPP) {
2784 dev_warn(&hdev->pdev->dev,
2785 "IMP do not support get SFP speed %d\n", ret);
2786 return ret;
2787 } else if (ret) {
2788 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2789 return ret;
2790 }
2791
2792 *speed = le32_to_cpu(resp->speed);
2793
2794 return 0;
2795}
2796
2797static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2798{
2799 struct hclge_sfp_info_cmd *resp;
2800 struct hclge_desc desc;
2801 int ret;
2802
2803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2804 resp = (struct hclge_sfp_info_cmd *)desc.data;
2805
2806 resp->query_type = QUERY_ACTIVE_SPEED;
2807
2808 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2809 if (ret == -EOPNOTSUPP) {
2810 dev_warn(&hdev->pdev->dev,
2811 "IMP does not support get SFP info %d\n", ret);
2812 return ret;
2813 } else if (ret) {
2814 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2815 return ret;
2816 }
2817
2818
2819
2820
2821 if (!le32_to_cpu(resp->speed))
2822 return 0;
2823
2824 mac->speed = le32_to_cpu(resp->speed);
2825
2826
2827
2828 if (resp->speed_ability) {
2829 mac->module_type = le32_to_cpu(resp->module_type);
2830 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2831 mac->autoneg = resp->autoneg;
2832 mac->support_autoneg = resp->autoneg_ability;
2833 mac->speed_type = QUERY_ACTIVE_SPEED;
2834 if (!resp->active_fec)
2835 mac->fec_mode = 0;
2836 else
2837 mac->fec_mode = BIT(resp->active_fec);
2838 } else {
2839 mac->speed_type = QUERY_SFP_SPEED;
2840 }
2841
2842 return 0;
2843}
2844
2845static int hclge_update_port_info(struct hclge_dev *hdev)
2846{
2847 struct hclge_mac *mac = &hdev->hw.mac;
2848 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2849 int ret;
2850
2851
2852 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2853 return 0;
2854
2855
2856 if (!hdev->support_sfp_query)
2857 return 0;
2858
2859 if (hdev->pdev->revision >= 0x21)
2860 ret = hclge_get_sfp_info(hdev, mac);
2861 else
2862 ret = hclge_get_sfp_speed(hdev, &speed);
2863
2864 if (ret == -EOPNOTSUPP) {
2865 hdev->support_sfp_query = false;
2866 return ret;
2867 } else if (ret) {
2868 return ret;
2869 }
2870
2871 if (hdev->pdev->revision >= 0x21) {
2872 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2873 hclge_update_port_capability(mac);
2874 return 0;
2875 }
2876 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2877 HCLGE_MAC_FULL);
2878 } else {
2879 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2880 return 0;
2881
2882
2883 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2884 }
2885}
2886
2887static int hclge_get_status(struct hnae3_handle *handle)
2888{
2889 struct hclge_vport *vport = hclge_get_vport(handle);
2890 struct hclge_dev *hdev = vport->back;
2891
2892 hclge_update_link_status(hdev);
2893
2894 return hdev->hw.mac.link;
2895}
2896
2897static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2898{
2899 if (!pci_num_vf(hdev->pdev)) {
2900 dev_err(&hdev->pdev->dev,
2901 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2902 return NULL;
2903 }
2904
2905 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2906 dev_err(&hdev->pdev->dev,
2907 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2908 vf, pci_num_vf(hdev->pdev));
2909 return NULL;
2910 }
2911
2912
2913 vf += HCLGE_VF_VPORT_START_NUM;
2914 return &hdev->vport[vf];
2915}
2916
2917static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2918 struct ifla_vf_info *ivf)
2919{
2920 struct hclge_vport *vport = hclge_get_vport(handle);
2921 struct hclge_dev *hdev = vport->back;
2922
2923 vport = hclge_get_vf_vport(hdev, vf);
2924 if (!vport)
2925 return -EINVAL;
2926
2927 ivf->vf = vf;
2928 ivf->linkstate = vport->vf_info.link_state;
2929 ivf->spoofchk = vport->vf_info.spoofchk;
2930 ivf->trusted = vport->vf_info.trusted;
2931 ivf->min_tx_rate = 0;
2932 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2933 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2934 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2935 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2936 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2937
2938 return 0;
2939}
2940
2941static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2942 int link_state)
2943{
2944 struct hclge_vport *vport = hclge_get_vport(handle);
2945 struct hclge_dev *hdev = vport->back;
2946
2947 vport = hclge_get_vf_vport(hdev, vf);
2948 if (!vport)
2949 return -EINVAL;
2950
2951 vport->vf_info.link_state = link_state;
2952
2953 return 0;
2954}
2955
2956static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2957{
2958 u32 cmdq_src_reg, msix_src_reg;
2959
2960
2961 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2962 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2973 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2974 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2975 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2976 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2977 hdev->rst_stats.imp_rst_cnt++;
2978 return HCLGE_VECTOR0_EVENT_RST;
2979 }
2980
2981 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2982 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2983 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2984 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2985 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2986 hdev->rst_stats.global_rst_cnt++;
2987 return HCLGE_VECTOR0_EVENT_RST;
2988 }
2989
2990
2991 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2992 *clearval = msix_src_reg;
2993 return HCLGE_VECTOR0_EVENT_ERR;
2994 }
2995
2996
2997 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2998 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2999 *clearval = cmdq_src_reg;
3000 return HCLGE_VECTOR0_EVENT_MBX;
3001 }
3002
3003
3004 dev_info(&hdev->pdev->dev,
3005 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3006 cmdq_src_reg, msix_src_reg);
3007 *clearval = msix_src_reg;
3008
3009 return HCLGE_VECTOR0_EVENT_OTHER;
3010}
3011
3012static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3013 u32 regclr)
3014{
3015 switch (event_type) {
3016 case HCLGE_VECTOR0_EVENT_RST:
3017 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3018 break;
3019 case HCLGE_VECTOR0_EVENT_MBX:
3020 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3021 break;
3022 default:
3023 break;
3024 }
3025}
3026
3027static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3028{
3029 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3030 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3031 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3032 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3033 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3034}
3035
3036static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3037{
3038 writel(enable ? 1 : 0, vector->addr);
3039}
3040
3041static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3042{
3043 struct hclge_dev *hdev = data;
3044 u32 clearval = 0;
3045 u32 event_cause;
3046
3047 hclge_enable_vector(&hdev->misc_vector, false);
3048 event_cause = hclge_check_event_cause(hdev, &clearval);
3049
3050
3051 switch (event_cause) {
3052 case HCLGE_VECTOR0_EVENT_ERR:
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3064 fallthrough;
3065 case HCLGE_VECTOR0_EVENT_RST:
3066 hclge_reset_task_schedule(hdev);
3067 break;
3068 case HCLGE_VECTOR0_EVENT_MBX:
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078 hclge_mbx_task_schedule(hdev);
3079 break;
3080 default:
3081 dev_warn(&hdev->pdev->dev,
3082 "received unknown or unhandled event of vector0\n");
3083 break;
3084 }
3085
3086 hclge_clear_event_cause(hdev, event_cause, clearval);
3087
3088
3089
3090
3091
3092
3093 if (!clearval ||
3094 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3095 hclge_enable_vector(&hdev->misc_vector, true);
3096 }
3097
3098 return IRQ_HANDLED;
3099}
3100
3101static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3102{
3103 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3104 dev_warn(&hdev->pdev->dev,
3105 "vector(vector_id %d) has been freed.\n", vector_id);
3106 return;
3107 }
3108
3109 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3110 hdev->num_msi_left += 1;
3111 hdev->num_msi_used -= 1;
3112}
3113
3114static void hclge_get_misc_vector(struct hclge_dev *hdev)
3115{
3116 struct hclge_misc_vector *vector = &hdev->misc_vector;
3117
3118 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3119
3120 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3121 hdev->vector_status[0] = 0;
3122
3123 hdev->num_msi_left -= 1;
3124 hdev->num_msi_used += 1;
3125}
3126
3127static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3128 const cpumask_t *mask)
3129{
3130 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3131 affinity_notify);
3132
3133 cpumask_copy(&hdev->affinity_mask, mask);
3134}
3135
3136static void hclge_irq_affinity_release(struct kref *ref)
3137{
3138}
3139
3140static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3141{
3142 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3143 &hdev->affinity_mask);
3144
3145 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3146 hdev->affinity_notify.release = hclge_irq_affinity_release;
3147 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3148 &hdev->affinity_notify);
3149}
3150
3151static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3152{
3153 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3154 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3155}
3156
3157static int hclge_misc_irq_init(struct hclge_dev *hdev)
3158{
3159 int ret;
3160
3161 hclge_get_misc_vector(hdev);
3162
3163
3164 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3165 HCLGE_NAME, pci_name(hdev->pdev));
3166 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3167 0, hdev->misc_vector.name, hdev);
3168 if (ret) {
3169 hclge_free_vector(hdev, 0);
3170 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3171 hdev->misc_vector.vector_irq);
3172 }
3173
3174 return ret;
3175}
3176
3177static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3178{
3179 free_irq(hdev->misc_vector.vector_irq, hdev);
3180 hclge_free_vector(hdev, 0);
3181}
3182
3183int hclge_notify_client(struct hclge_dev *hdev,
3184 enum hnae3_reset_notify_type type)
3185{
3186 struct hnae3_client *client = hdev->nic_client;
3187 u16 i;
3188
3189 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3190 return 0;
3191
3192 if (!client->ops->reset_notify)
3193 return -EOPNOTSUPP;
3194
3195 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3196 struct hnae3_handle *handle = &hdev->vport[i].nic;
3197 int ret;
3198
3199 ret = client->ops->reset_notify(handle, type);
3200 if (ret) {
3201 dev_err(&hdev->pdev->dev,
3202 "notify nic client failed %d(%d)\n", type, ret);
3203 return ret;
3204 }
3205 }
3206
3207 return 0;
3208}
3209
3210static int hclge_notify_roce_client(struct hclge_dev *hdev,
3211 enum hnae3_reset_notify_type type)
3212{
3213 struct hnae3_client *client = hdev->roce_client;
3214 int ret = 0;
3215 u16 i;
3216
3217 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3218 return 0;
3219
3220 if (!client->ops->reset_notify)
3221 return -EOPNOTSUPP;
3222
3223 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3224 struct hnae3_handle *handle = &hdev->vport[i].roce;
3225
3226 ret = client->ops->reset_notify(handle, type);
3227 if (ret) {
3228 dev_err(&hdev->pdev->dev,
3229 "notify roce client failed %d(%d)",
3230 type, ret);
3231 return ret;
3232 }
3233 }
3234
3235 return ret;
3236}
3237
3238static int hclge_reset_wait(struct hclge_dev *hdev)
3239{
3240#define HCLGE_RESET_WATI_MS 100
3241#define HCLGE_RESET_WAIT_CNT 350
3242
3243 u32 val, reg, reg_bit;
3244 u32 cnt = 0;
3245
3246 switch (hdev->reset_type) {
3247 case HNAE3_IMP_RESET:
3248 reg = HCLGE_GLOBAL_RESET_REG;
3249 reg_bit = HCLGE_IMP_RESET_BIT;
3250 break;
3251 case HNAE3_GLOBAL_RESET:
3252 reg = HCLGE_GLOBAL_RESET_REG;
3253 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3254 break;
3255 case HNAE3_FUNC_RESET:
3256 reg = HCLGE_FUN_RST_ING;
3257 reg_bit = HCLGE_FUN_RST_ING_B;
3258 break;
3259 default:
3260 dev_err(&hdev->pdev->dev,
3261 "Wait for unsupported reset type: %d\n",
3262 hdev->reset_type);
3263 return -EINVAL;
3264 }
3265
3266 val = hclge_read_dev(&hdev->hw, reg);
3267 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3268 msleep(HCLGE_RESET_WATI_MS);
3269 val = hclge_read_dev(&hdev->hw, reg);
3270 cnt++;
3271 }
3272
3273 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3274 dev_warn(&hdev->pdev->dev,
3275 "Wait for reset timeout: %d\n", hdev->reset_type);
3276 return -EBUSY;
3277 }
3278
3279 return 0;
3280}
3281
3282static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3283{
3284 struct hclge_vf_rst_cmd *req;
3285 struct hclge_desc desc;
3286
3287 req = (struct hclge_vf_rst_cmd *)desc.data;
3288 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3289 req->dest_vfid = func_id;
3290
3291 if (reset)
3292 req->vf_rst = 0x1;
3293
3294 return hclge_cmd_send(&hdev->hw, &desc, 1);
3295}
3296
3297static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3298{
3299 int i;
3300
3301 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3302 struct hclge_vport *vport = &hdev->vport[i];
3303 int ret;
3304
3305
3306 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3307 if (ret) {
3308 dev_err(&hdev->pdev->dev,
3309 "set vf(%u) rst failed %d!\n",
3310 vport->vport_id, ret);
3311 return ret;
3312 }
3313
3314 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3315 continue;
3316
3317
3318
3319
3320
3321 ret = hclge_inform_reset_assert_to_vf(vport);
3322 if (ret)
3323 dev_warn(&hdev->pdev->dev,
3324 "inform reset to vf(%u) failed %d!\n",
3325 vport->vport_id, ret);
3326 }
3327
3328 return 0;
3329}
3330
3331static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3332{
3333 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3334 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3335 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3336 return;
3337
3338 hclge_mbx_handler(hdev);
3339
3340 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3341}
3342
3343static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3344{
3345 struct hclge_pf_rst_sync_cmd *req;
3346 struct hclge_desc desc;
3347 int cnt = 0;
3348 int ret;
3349
3350 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3352
3353 do {
3354
3355 hclge_mailbox_service_task(hdev);
3356
3357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3358
3359
3360
3361 if (ret == -EOPNOTSUPP) {
3362 msleep(HCLGE_RESET_SYNC_TIME);
3363 return;
3364 } else if (ret) {
3365 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3366 ret);
3367 return;
3368 } else if (req->all_vf_ready) {
3369 return;
3370 }
3371 msleep(HCLGE_PF_RESET_SYNC_TIME);
3372 hclge_cmd_reuse_desc(&desc, true);
3373 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3374
3375 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3376}
3377
3378void hclge_report_hw_error(struct hclge_dev *hdev,
3379 enum hnae3_hw_error_type type)
3380{
3381 struct hnae3_client *client = hdev->nic_client;
3382 u16 i;
3383
3384 if (!client || !client->ops->process_hw_error ||
3385 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3386 return;
3387
3388 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3389 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3390}
3391
3392static void hclge_handle_imp_error(struct hclge_dev *hdev)
3393{
3394 u32 reg_val;
3395
3396 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3397 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3398 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3399 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3400 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3401 }
3402
3403 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3404 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3405 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3406 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3407 }
3408}
3409
3410int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3411{
3412 struct hclge_desc desc;
3413 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3414 int ret;
3415
3416 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3417 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3418 req->fun_reset_vfid = func_id;
3419
3420 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3421 if (ret)
3422 dev_err(&hdev->pdev->dev,
3423 "send function reset cmd fail, status =%d\n", ret);
3424
3425 return ret;
3426}
3427
3428static void hclge_do_reset(struct hclge_dev *hdev)
3429{
3430 struct hnae3_handle *handle = &hdev->vport[0].nic;
3431 struct pci_dev *pdev = hdev->pdev;
3432 u32 val;
3433
3434 if (hclge_get_hw_reset_stat(handle)) {
3435 dev_info(&pdev->dev, "hardware reset not finish\n");
3436 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3437 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3438 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3439 return;
3440 }
3441
3442 switch (hdev->reset_type) {
3443 case HNAE3_GLOBAL_RESET:
3444 dev_info(&pdev->dev, "global reset requested\n");
3445 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3446 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3447 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3448 break;
3449 case HNAE3_FUNC_RESET:
3450 dev_info(&pdev->dev, "PF reset requested\n");
3451
3452 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3453 hclge_reset_task_schedule(hdev);
3454 break;
3455 default:
3456 dev_warn(&pdev->dev,
3457 "unsupported reset type: %d\n", hdev->reset_type);
3458 break;
3459 }
3460}
3461
3462static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3463 unsigned long *addr)
3464{
3465 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3466 struct hclge_dev *hdev = ae_dev->priv;
3467
3468
3469 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3470 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3471 HCLGE_MISC_VECTOR_INT_STS);
3472
3473
3474
3475 if (hclge_handle_hw_msix_error(hdev, addr))
3476 dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3477 msix_sts_reg);
3478
3479 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3480
3481
3482
3483
3484
3485
3486
3487 hclge_enable_vector(&hdev->misc_vector, true);
3488 }
3489
3490
3491 if (test_bit(HNAE3_IMP_RESET, addr)) {
3492 rst_level = HNAE3_IMP_RESET;
3493 clear_bit(HNAE3_IMP_RESET, addr);
3494 clear_bit(HNAE3_GLOBAL_RESET, addr);
3495 clear_bit(HNAE3_FUNC_RESET, addr);
3496 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3497 rst_level = HNAE3_GLOBAL_RESET;
3498 clear_bit(HNAE3_GLOBAL_RESET, addr);
3499 clear_bit(HNAE3_FUNC_RESET, addr);
3500 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3501 rst_level = HNAE3_FUNC_RESET;
3502 clear_bit(HNAE3_FUNC_RESET, addr);
3503 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3504 rst_level = HNAE3_FLR_RESET;
3505 clear_bit(HNAE3_FLR_RESET, addr);
3506 }
3507
3508 if (hdev->reset_type != HNAE3_NONE_RESET &&
3509 rst_level < hdev->reset_type)
3510 return HNAE3_NONE_RESET;
3511
3512 return rst_level;
3513}
3514
3515static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3516{
3517 u32 clearval = 0;
3518
3519 switch (hdev->reset_type) {
3520 case HNAE3_IMP_RESET:
3521 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3522 break;
3523 case HNAE3_GLOBAL_RESET:
3524 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3525 break;
3526 default:
3527 break;
3528 }
3529
3530 if (!clearval)
3531 return;
3532
3533
3534
3535
3536 if (hdev->pdev->revision == 0x20)
3537 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3538 clearval);
3539
3540 hclge_enable_vector(&hdev->misc_vector, true);
3541}
3542
3543static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3544{
3545 u32 reg_val;
3546
3547 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3548 if (enable)
3549 reg_val |= HCLGE_NIC_SW_RST_RDY;
3550 else
3551 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3552
3553 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3554}
3555
3556static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3557{
3558 int ret;
3559
3560 ret = hclge_set_all_vf_rst(hdev, true);
3561 if (ret)
3562 return ret;
3563
3564 hclge_func_reset_sync_vf(hdev);
3565
3566 return 0;
3567}
3568
3569static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3570{
3571 u32 reg_val;
3572 int ret = 0;
3573
3574 switch (hdev->reset_type) {
3575 case HNAE3_FUNC_RESET:
3576 ret = hclge_func_reset_notify_vf(hdev);
3577 if (ret)
3578 return ret;
3579
3580 ret = hclge_func_reset_cmd(hdev, 0);
3581 if (ret) {
3582 dev_err(&hdev->pdev->dev,
3583 "asserting function reset fail %d!\n", ret);
3584 return ret;
3585 }
3586
3587
3588
3589
3590
3591
3592 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3593 hdev->rst_stats.pf_rst_cnt++;
3594 break;
3595 case HNAE3_FLR_RESET:
3596 ret = hclge_func_reset_notify_vf(hdev);
3597 if (ret)
3598 return ret;
3599 break;
3600 case HNAE3_IMP_RESET:
3601 hclge_handle_imp_error(hdev);
3602 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3603 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3604 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3605 break;
3606 default:
3607 break;
3608 }
3609
3610
3611 msleep(HCLGE_RESET_SYNC_TIME);
3612 hclge_reset_handshake(hdev, true);
3613 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3614
3615 return ret;
3616}
3617
3618static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3619{
3620#define MAX_RESET_FAIL_CNT 5
3621
3622 if (hdev->reset_pending) {
3623 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3624 hdev->reset_pending);
3625 return true;
3626 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3627 HCLGE_RESET_INT_M) {
3628 dev_info(&hdev->pdev->dev,
3629 "reset failed because new reset interrupt\n");
3630 hclge_clear_reset_cause(hdev);
3631 return false;
3632 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3633 hdev->rst_stats.reset_fail_cnt++;
3634 set_bit(hdev->reset_type, &hdev->reset_pending);
3635 dev_info(&hdev->pdev->dev,
3636 "re-schedule reset task(%u)\n",
3637 hdev->rst_stats.reset_fail_cnt);
3638 return true;
3639 }
3640
3641 hclge_clear_reset_cause(hdev);
3642
3643
3644 hclge_reset_handshake(hdev, true);
3645
3646 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3647
3648 hclge_dbg_dump_rst_info(hdev);
3649
3650 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3651
3652 return false;
3653}
3654
3655static int hclge_set_rst_done(struct hclge_dev *hdev)
3656{
3657 struct hclge_pf_rst_done_cmd *req;
3658 struct hclge_desc desc;
3659 int ret;
3660
3661 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3662 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3663 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3664
3665 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3666
3667
3668
3669
3670 if (ret == -EOPNOTSUPP) {
3671 dev_warn(&hdev->pdev->dev,
3672 "current firmware does not support command(0x%x)!\n",
3673 HCLGE_OPC_PF_RST_DONE);
3674 return 0;
3675 } else if (ret) {
3676 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3677 ret);
3678 }
3679
3680 return ret;
3681}
3682
3683static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3684{
3685 int ret = 0;
3686
3687 switch (hdev->reset_type) {
3688 case HNAE3_FUNC_RESET:
3689 case HNAE3_FLR_RESET:
3690 ret = hclge_set_all_vf_rst(hdev, false);
3691 break;
3692 case HNAE3_GLOBAL_RESET:
3693 case HNAE3_IMP_RESET:
3694 ret = hclge_set_rst_done(hdev);
3695 break;
3696 default:
3697 break;
3698 }
3699
3700
3701 hclge_reset_handshake(hdev, false);
3702
3703 return ret;
3704}
3705
3706static int hclge_reset_stack(struct hclge_dev *hdev)
3707{
3708 int ret;
3709
3710 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3711 if (ret)
3712 return ret;
3713
3714 ret = hclge_reset_ae_dev(hdev->ae_dev);
3715 if (ret)
3716 return ret;
3717
3718 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3719}
3720
3721static int hclge_reset_prepare(struct hclge_dev *hdev)
3722{
3723 int ret;
3724
3725 hdev->rst_stats.reset_cnt++;
3726
3727 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3728 if (ret)
3729 return ret;
3730
3731 rtnl_lock();
3732 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3733 rtnl_unlock();
3734 if (ret)
3735 return ret;
3736
3737 return hclge_reset_prepare_wait(hdev);
3738}
3739
3740static int hclge_reset_rebuild(struct hclge_dev *hdev)
3741{
3742 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3743 enum hnae3_reset_type reset_level;
3744 int ret;
3745
3746 hdev->rst_stats.hw_reset_done_cnt++;
3747
3748 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3749 if (ret)
3750 return ret;
3751
3752 rtnl_lock();
3753 ret = hclge_reset_stack(hdev);
3754 rtnl_unlock();
3755 if (ret)
3756 return ret;
3757
3758 hclge_clear_reset_cause(hdev);
3759
3760 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3761
3762
3763
3764 if (ret &&
3765 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3766 return ret;
3767
3768 ret = hclge_reset_prepare_up(hdev);
3769 if (ret)
3770 return ret;
3771
3772 rtnl_lock();
3773 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3774 rtnl_unlock();
3775 if (ret)
3776 return ret;
3777
3778 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3779 if (ret)
3780 return ret;
3781
3782 hdev->last_reset_time = jiffies;
3783 hdev->rst_stats.reset_fail_cnt = 0;
3784 hdev->rst_stats.reset_done_cnt++;
3785 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3786
3787
3788
3789
3790
3791 reset_level = hclge_get_reset_level(ae_dev,
3792 &hdev->default_reset_request);
3793 if (reset_level != HNAE3_NONE_RESET)
3794 set_bit(reset_level, &hdev->reset_request);
3795
3796 return 0;
3797}
3798
3799static void hclge_reset(struct hclge_dev *hdev)
3800{
3801 if (hclge_reset_prepare(hdev))
3802 goto err_reset;
3803
3804 if (hclge_reset_wait(hdev))
3805 goto err_reset;
3806
3807 if (hclge_reset_rebuild(hdev))
3808 goto err_reset;
3809
3810 return;
3811
3812err_reset:
3813 if (hclge_reset_err_handle(hdev))
3814 hclge_reset_task_schedule(hdev);
3815}
3816
3817static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3818{
3819 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3820 struct hclge_dev *hdev = ae_dev->priv;
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837 if (!handle)
3838 handle = &hdev->vport[0].nic;
3839
3840 if (time_before(jiffies, (hdev->last_reset_time +
3841 HCLGE_RESET_INTERVAL))) {
3842 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3843 return;
3844 } else if (hdev->default_reset_request) {
3845 hdev->reset_level =
3846 hclge_get_reset_level(ae_dev,
3847 &hdev->default_reset_request);
3848 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3849 hdev->reset_level = HNAE3_FUNC_RESET;
3850 }
3851
3852 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3853 hdev->reset_level);
3854
3855
3856 set_bit(hdev->reset_level, &hdev->reset_request);
3857 hclge_reset_task_schedule(hdev);
3858
3859 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3860 hdev->reset_level++;
3861}
3862
3863static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3864 enum hnae3_reset_type rst_type)
3865{
3866 struct hclge_dev *hdev = ae_dev->priv;
3867
3868 set_bit(rst_type, &hdev->default_reset_request);
3869}
3870
3871static void hclge_reset_timer(struct timer_list *t)
3872{
3873 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3874
3875
3876
3877
3878 if (!hdev->default_reset_request)
3879 return;
3880
3881 dev_info(&hdev->pdev->dev,
3882 "triggering reset in reset timer\n");
3883 hclge_reset_event(hdev->pdev, NULL);
3884}
3885
3886static void hclge_reset_subtask(struct hclge_dev *hdev)
3887{
3888 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899 hdev->last_reset_time = jiffies;
3900 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3901 if (hdev->reset_type != HNAE3_NONE_RESET)
3902 hclge_reset(hdev);
3903
3904
3905 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3906 if (hdev->reset_type != HNAE3_NONE_RESET)
3907 hclge_do_reset(hdev);
3908
3909 hdev->reset_type = HNAE3_NONE_RESET;
3910}
3911
3912static void hclge_reset_service_task(struct hclge_dev *hdev)
3913{
3914 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3915 return;
3916
3917 down(&hdev->reset_sem);
3918 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3919
3920 hclge_reset_subtask(hdev);
3921
3922 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3923 up(&hdev->reset_sem);
3924}
3925
3926static void hclge_update_vport_alive(struct hclge_dev *hdev)
3927{
3928 int i;
3929
3930
3931 for (i = 1; i < hdev->num_alloc_vport; i++) {
3932 struct hclge_vport *vport = &hdev->vport[i];
3933
3934 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3935 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3936
3937
3938 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3939 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3940 }
3941}
3942
3943static void hclge_periodic_service_task(struct hclge_dev *hdev)
3944{
3945 unsigned long delta = round_jiffies_relative(HZ);
3946
3947
3948
3949
3950 hclge_update_link_status(hdev);
3951 hclge_sync_mac_table(hdev);
3952 hclge_sync_promisc_mode(hdev);
3953
3954 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3955 delta = jiffies - hdev->last_serv_processed;
3956
3957 if (delta < round_jiffies_relative(HZ)) {
3958 delta = round_jiffies_relative(HZ) - delta;
3959 goto out;
3960 }
3961 }
3962
3963 hdev->serv_processed_cnt++;
3964 hclge_update_vport_alive(hdev);
3965
3966 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3967 hdev->last_serv_processed = jiffies;
3968 goto out;
3969 }
3970
3971 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3972 hclge_update_stats_for_all(hdev);
3973
3974 hclge_update_port_info(hdev);
3975 hclge_sync_vlan_filter(hdev);
3976
3977 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3978 hclge_rfs_filter_expire(hdev);
3979
3980 hdev->last_serv_processed = jiffies;
3981
3982out:
3983 hclge_task_schedule(hdev, delta);
3984}
3985
3986static void hclge_service_task(struct work_struct *work)
3987{
3988 struct hclge_dev *hdev =
3989 container_of(work, struct hclge_dev, service_task.work);
3990
3991 hclge_reset_service_task(hdev);
3992 hclge_mailbox_service_task(hdev);
3993 hclge_periodic_service_task(hdev);
3994
3995
3996
3997
3998
3999 hclge_reset_service_task(hdev);
4000 hclge_mailbox_service_task(hdev);
4001}
4002
4003struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4004{
4005
4006 if (!handle->client)
4007 return container_of(handle, struct hclge_vport, nic);
4008 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4009 return container_of(handle, struct hclge_vport, roce);
4010 else
4011 return container_of(handle, struct hclge_vport, nic);
4012}
4013
4014static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4015 struct hnae3_vector_info *vector_info)
4016{
4017 struct hclge_vport *vport = hclge_get_vport(handle);
4018 struct hnae3_vector_info *vector = vector_info;
4019 struct hclge_dev *hdev = vport->back;
4020 int alloc = 0;
4021 int i, j;
4022
4023 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4024 vector_num = min(hdev->num_msi_left, vector_num);
4025
4026 for (j = 0; j < vector_num; j++) {
4027 for (i = 1; i < hdev->num_msi; i++) {
4028 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4029 vector->vector = pci_irq_vector(hdev->pdev, i);
4030 vector->io_addr = hdev->hw.io_base +
4031 HCLGE_VECTOR_REG_BASE +
4032 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4033 vport->vport_id *
4034 HCLGE_VECTOR_VF_OFFSET;
4035 hdev->vector_status[i] = vport->vport_id;
4036 hdev->vector_irq[i] = vector->vector;
4037
4038 vector++;
4039 alloc++;
4040
4041 break;
4042 }
4043 }
4044 }
4045 hdev->num_msi_left -= alloc;
4046 hdev->num_msi_used += alloc;
4047
4048 return alloc;
4049}
4050
4051static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4052{
4053 int i;
4054
4055 for (i = 0; i < hdev->num_msi; i++)
4056 if (vector == hdev->vector_irq[i])
4057 return i;
4058
4059 return -EINVAL;
4060}
4061
4062static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4063{
4064 struct hclge_vport *vport = hclge_get_vport(handle);
4065 struct hclge_dev *hdev = vport->back;
4066 int vector_id;
4067
4068 vector_id = hclge_get_vector_index(hdev, vector);
4069 if (vector_id < 0) {
4070 dev_err(&hdev->pdev->dev,
4071 "Get vector index fail. vector = %d\n", vector);
4072 return vector_id;
4073 }
4074
4075 hclge_free_vector(hdev, vector_id);
4076
4077 return 0;
4078}
4079
4080static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4081{
4082 return HCLGE_RSS_KEY_SIZE;
4083}
4084
4085static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4086{
4087 return HCLGE_RSS_IND_TBL_SIZE;
4088}
4089
4090static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4091 const u8 hfunc, const u8 *key)
4092{
4093 struct hclge_rss_config_cmd *req;
4094 unsigned int key_offset = 0;
4095 struct hclge_desc desc;
4096 int key_counts;
4097 int key_size;
4098 int ret;
4099
4100 key_counts = HCLGE_RSS_KEY_SIZE;
4101 req = (struct hclge_rss_config_cmd *)desc.data;
4102
4103 while (key_counts) {
4104 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4105 false);
4106
4107 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4108 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4109
4110 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4111 memcpy(req->hash_key,
4112 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4113
4114 key_counts -= key_size;
4115 key_offset++;
4116 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4117 if (ret) {
4118 dev_err(&hdev->pdev->dev,
4119 "Configure RSS config fail, status = %d\n",
4120 ret);
4121 return ret;
4122 }
4123 }
4124 return 0;
4125}
4126
4127static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4128{
4129 struct hclge_rss_indirection_table_cmd *req;
4130 struct hclge_desc desc;
4131 int i, j;
4132 int ret;
4133
4134 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4135
4136 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4137 hclge_cmd_setup_basic_desc
4138 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4139
4140 req->start_table_index =
4141 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4142 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4143
4144 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4145 req->rss_result[j] =
4146 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4147
4148 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4149 if (ret) {
4150 dev_err(&hdev->pdev->dev,
4151 "Configure rss indir table fail,status = %d\n",
4152 ret);
4153 return ret;
4154 }
4155 }
4156 return 0;
4157}
4158
4159static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4160 u16 *tc_size, u16 *tc_offset)
4161{
4162 struct hclge_rss_tc_mode_cmd *req;
4163 struct hclge_desc desc;
4164 int ret;
4165 int i;
4166
4167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4168 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4169
4170 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4171 u16 mode = 0;
4172
4173 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4174 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4175 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4176 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4177 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4178
4179 req->rss_tc_mode[i] = cpu_to_le16(mode);
4180 }
4181
4182 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4183 if (ret)
4184 dev_err(&hdev->pdev->dev,
4185 "Configure rss tc mode fail, status = %d\n", ret);
4186
4187 return ret;
4188}
4189
4190static void hclge_get_rss_type(struct hclge_vport *vport)
4191{
4192 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4193 vport->rss_tuple_sets.ipv4_udp_en ||
4194 vport->rss_tuple_sets.ipv4_sctp_en ||
4195 vport->rss_tuple_sets.ipv6_tcp_en ||
4196 vport->rss_tuple_sets.ipv6_udp_en ||
4197 vport->rss_tuple_sets.ipv6_sctp_en)
4198 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4199 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4200 vport->rss_tuple_sets.ipv6_fragment_en)
4201 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4202 else
4203 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4204}
4205
4206static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4207{
4208 struct hclge_rss_input_tuple_cmd *req;
4209 struct hclge_desc desc;
4210 int ret;
4211
4212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4213
4214 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4215
4216
4217 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4218 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4219 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4220 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4221 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4222 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4223 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4224 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4225 hclge_get_rss_type(&hdev->vport[0]);
4226 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4227 if (ret)
4228 dev_err(&hdev->pdev->dev,
4229 "Configure rss input fail, status = %d\n", ret);
4230 return ret;
4231}
4232
4233static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4234 u8 *key, u8 *hfunc)
4235{
4236 struct hclge_vport *vport = hclge_get_vport(handle);
4237 int i;
4238
4239
4240 if (hfunc) {
4241 switch (vport->rss_algo) {
4242 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4243 *hfunc = ETH_RSS_HASH_TOP;
4244 break;
4245 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4246 *hfunc = ETH_RSS_HASH_XOR;
4247 break;
4248 default:
4249 *hfunc = ETH_RSS_HASH_UNKNOWN;
4250 break;
4251 }
4252 }
4253
4254
4255 if (key)
4256 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4257
4258
4259 if (indir)
4260 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4261 indir[i] = vport->rss_indirection_tbl[i];
4262
4263 return 0;
4264}
4265
4266static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4267 const u8 *key, const u8 hfunc)
4268{
4269 struct hclge_vport *vport = hclge_get_vport(handle);
4270 struct hclge_dev *hdev = vport->back;
4271 u8 hash_algo;
4272 int ret, i;
4273
4274
4275 if (key) {
4276 switch (hfunc) {
4277 case ETH_RSS_HASH_TOP:
4278 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4279 break;
4280 case ETH_RSS_HASH_XOR:
4281 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4282 break;
4283 case ETH_RSS_HASH_NO_CHANGE:
4284 hash_algo = vport->rss_algo;
4285 break;
4286 default:
4287 return -EINVAL;
4288 }
4289
4290 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4291 if (ret)
4292 return ret;
4293
4294
4295 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4296 vport->rss_algo = hash_algo;
4297 }
4298
4299
4300 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4301 vport->rss_indirection_tbl[i] = indir[i];
4302
4303
4304 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4305}
4306
4307static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4308{
4309 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4310
4311 if (nfc->data & RXH_L4_B_2_3)
4312 hash_sets |= HCLGE_D_PORT_BIT;
4313 else
4314 hash_sets &= ~HCLGE_D_PORT_BIT;
4315
4316 if (nfc->data & RXH_IP_SRC)
4317 hash_sets |= HCLGE_S_IP_BIT;
4318 else
4319 hash_sets &= ~HCLGE_S_IP_BIT;
4320
4321 if (nfc->data & RXH_IP_DST)
4322 hash_sets |= HCLGE_D_IP_BIT;
4323 else
4324 hash_sets &= ~HCLGE_D_IP_BIT;
4325
4326 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4327 hash_sets |= HCLGE_V_TAG_BIT;
4328
4329 return hash_sets;
4330}
4331
4332static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4333 struct ethtool_rxnfc *nfc)
4334{
4335 struct hclge_vport *vport = hclge_get_vport(handle);
4336 struct hclge_dev *hdev = vport->back;
4337 struct hclge_rss_input_tuple_cmd *req;
4338 struct hclge_desc desc;
4339 u8 tuple_sets;
4340 int ret;
4341
4342 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4343 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4344 return -EINVAL;
4345
4346 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4348
4349 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4350 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4351 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4352 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4353 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4354 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4355 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4356 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4357
4358 tuple_sets = hclge_get_rss_hash_bits(nfc);
4359 switch (nfc->flow_type) {
4360 case TCP_V4_FLOW:
4361 req->ipv4_tcp_en = tuple_sets;
4362 break;
4363 case TCP_V6_FLOW:
4364 req->ipv6_tcp_en = tuple_sets;
4365 break;
4366 case UDP_V4_FLOW:
4367 req->ipv4_udp_en = tuple_sets;
4368 break;
4369 case UDP_V6_FLOW:
4370 req->ipv6_udp_en = tuple_sets;
4371 break;
4372 case SCTP_V4_FLOW:
4373 req->ipv4_sctp_en = tuple_sets;
4374 break;
4375 case SCTP_V6_FLOW:
4376 if ((nfc->data & RXH_L4_B_0_1) ||
4377 (nfc->data & RXH_L4_B_2_3))
4378 return -EINVAL;
4379
4380 req->ipv6_sctp_en = tuple_sets;
4381 break;
4382 case IPV4_FLOW:
4383 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4384 break;
4385 case IPV6_FLOW:
4386 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4387 break;
4388 default:
4389 return -EINVAL;
4390 }
4391
4392 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4393 if (ret) {
4394 dev_err(&hdev->pdev->dev,
4395 "Set rss tuple fail, status = %d\n", ret);
4396 return ret;
4397 }
4398
4399 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4400 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4401 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4402 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4403 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4404 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4405 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4406 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4407 hclge_get_rss_type(vport);
4408 return 0;
4409}
4410
4411static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4412 struct ethtool_rxnfc *nfc)
4413{
4414 struct hclge_vport *vport = hclge_get_vport(handle);
4415 u8 tuple_sets;
4416
4417 nfc->data = 0;
4418
4419 switch (nfc->flow_type) {
4420 case TCP_V4_FLOW:
4421 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4422 break;
4423 case UDP_V4_FLOW:
4424 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4425 break;
4426 case TCP_V6_FLOW:
4427 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4428 break;
4429 case UDP_V6_FLOW:
4430 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4431 break;
4432 case SCTP_V4_FLOW:
4433 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4434 break;
4435 case SCTP_V6_FLOW:
4436 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4437 break;
4438 case IPV4_FLOW:
4439 case IPV6_FLOW:
4440 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4441 break;
4442 default:
4443 return -EINVAL;
4444 }
4445
4446 if (!tuple_sets)
4447 return 0;
4448
4449 if (tuple_sets & HCLGE_D_PORT_BIT)
4450 nfc->data |= RXH_L4_B_2_3;
4451 if (tuple_sets & HCLGE_S_PORT_BIT)
4452 nfc->data |= RXH_L4_B_0_1;
4453 if (tuple_sets & HCLGE_D_IP_BIT)
4454 nfc->data |= RXH_IP_DST;
4455 if (tuple_sets & HCLGE_S_IP_BIT)
4456 nfc->data |= RXH_IP_SRC;
4457
4458 return 0;
4459}
4460
4461static int hclge_get_tc_size(struct hnae3_handle *handle)
4462{
4463 struct hclge_vport *vport = hclge_get_vport(handle);
4464 struct hclge_dev *hdev = vport->back;
4465
4466 return hdev->rss_size_max;
4467}
4468
4469int hclge_rss_init_hw(struct hclge_dev *hdev)
4470{
4471 struct hclge_vport *vport = hdev->vport;
4472 u8 *rss_indir = vport[0].rss_indirection_tbl;
4473 u16 rss_size = vport[0].alloc_rss_size;
4474 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4475 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4476 u8 *key = vport[0].rss_hash_key;
4477 u8 hfunc = vport[0].rss_algo;
4478 u16 tc_valid[HCLGE_MAX_TC_NUM];
4479 u16 roundup_size;
4480 unsigned int i;
4481 int ret;
4482
4483 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4484 if (ret)
4485 return ret;
4486
4487 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4488 if (ret)
4489 return ret;
4490
4491 ret = hclge_set_rss_input_tuple(hdev);
4492 if (ret)
4493 return ret;
4494
4495
4496
4497
4498
4499 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4500 dev_err(&hdev->pdev->dev,
4501 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4502 rss_size);
4503 return -EINVAL;
4504 }
4505
4506 roundup_size = roundup_pow_of_two(rss_size);
4507 roundup_size = ilog2(roundup_size);
4508
4509 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4510 tc_valid[i] = 0;
4511
4512 if (!(hdev->hw_tc_map & BIT(i)))
4513 continue;
4514
4515 tc_valid[i] = 1;
4516 tc_size[i] = roundup_size;
4517 tc_offset[i] = rss_size * i;
4518 }
4519
4520 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4521}
4522
4523void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4524{
4525 struct hclge_vport *vport = hdev->vport;
4526 int i, j;
4527
4528 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4529 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4530 vport[j].rss_indirection_tbl[i] =
4531 i % vport[j].alloc_rss_size;
4532 }
4533}
4534
4535static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4536{
4537 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4538 struct hclge_vport *vport = hdev->vport;
4539
4540 if (hdev->pdev->revision >= 0x21)
4541 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4542
4543 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4544 vport[i].rss_tuple_sets.ipv4_tcp_en =
4545 HCLGE_RSS_INPUT_TUPLE_OTHER;
4546 vport[i].rss_tuple_sets.ipv4_udp_en =
4547 HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 vport[i].rss_tuple_sets.ipv4_sctp_en =
4549 HCLGE_RSS_INPUT_TUPLE_SCTP;
4550 vport[i].rss_tuple_sets.ipv4_fragment_en =
4551 HCLGE_RSS_INPUT_TUPLE_OTHER;
4552 vport[i].rss_tuple_sets.ipv6_tcp_en =
4553 HCLGE_RSS_INPUT_TUPLE_OTHER;
4554 vport[i].rss_tuple_sets.ipv6_udp_en =
4555 HCLGE_RSS_INPUT_TUPLE_OTHER;
4556 vport[i].rss_tuple_sets.ipv6_sctp_en =
4557 HCLGE_RSS_INPUT_TUPLE_SCTP;
4558 vport[i].rss_tuple_sets.ipv6_fragment_en =
4559 HCLGE_RSS_INPUT_TUPLE_OTHER;
4560
4561 vport[i].rss_algo = rss_algo;
4562
4563 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4564 HCLGE_RSS_KEY_SIZE);
4565 }
4566
4567 hclge_rss_indir_init_cfg(hdev);
4568}
4569
4570int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4571 int vector_id, bool en,
4572 struct hnae3_ring_chain_node *ring_chain)
4573{
4574 struct hclge_dev *hdev = vport->back;
4575 struct hnae3_ring_chain_node *node;
4576 struct hclge_desc desc;
4577 struct hclge_ctrl_vector_chain_cmd *req =
4578 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4579 enum hclge_cmd_status status;
4580 enum hclge_opcode_type op;
4581 u16 tqp_type_and_id;
4582 int i;
4583
4584 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4585 hclge_cmd_setup_basic_desc(&desc, op, false);
4586 req->int_vector_id = vector_id;
4587
4588 i = 0;
4589 for (node = ring_chain; node; node = node->next) {
4590 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4591 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4592 HCLGE_INT_TYPE_S,
4593 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4594 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4595 HCLGE_TQP_ID_S, node->tqp_index);
4596 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4597 HCLGE_INT_GL_IDX_S,
4598 hnae3_get_field(node->int_gl_idx,
4599 HNAE3_RING_GL_IDX_M,
4600 HNAE3_RING_GL_IDX_S));
4601 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4602 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4603 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4604 req->vfid = vport->vport_id;
4605
4606 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4607 if (status) {
4608 dev_err(&hdev->pdev->dev,
4609 "Map TQP fail, status is %d.\n",
4610 status);
4611 return -EIO;
4612 }
4613 i = 0;
4614
4615 hclge_cmd_setup_basic_desc(&desc,
4616 op,
4617 false);
4618 req->int_vector_id = vector_id;
4619 }
4620 }
4621
4622 if (i > 0) {
4623 req->int_cause_num = i;
4624 req->vfid = vport->vport_id;
4625 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4626 if (status) {
4627 dev_err(&hdev->pdev->dev,
4628 "Map TQP fail, status is %d.\n", status);
4629 return -EIO;
4630 }
4631 }
4632
4633 return 0;
4634}
4635
4636static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4637 struct hnae3_ring_chain_node *ring_chain)
4638{
4639 struct hclge_vport *vport = hclge_get_vport(handle);
4640 struct hclge_dev *hdev = vport->back;
4641 int vector_id;
4642
4643 vector_id = hclge_get_vector_index(hdev, vector);
4644 if (vector_id < 0) {
4645 dev_err(&hdev->pdev->dev,
4646 "failed to get vector index. vector=%d\n", vector);
4647 return vector_id;
4648 }
4649
4650 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4651}
4652
4653static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4654 struct hnae3_ring_chain_node *ring_chain)
4655{
4656 struct hclge_vport *vport = hclge_get_vport(handle);
4657 struct hclge_dev *hdev = vport->back;
4658 int vector_id, ret;
4659
4660 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4661 return 0;
4662
4663 vector_id = hclge_get_vector_index(hdev, vector);
4664 if (vector_id < 0) {
4665 dev_err(&handle->pdev->dev,
4666 "Get vector index fail. ret =%d\n", vector_id);
4667 return vector_id;
4668 }
4669
4670 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4671 if (ret)
4672 dev_err(&handle->pdev->dev,
4673 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4674 vector_id, ret);
4675
4676 return ret;
4677}
4678
4679static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4680 struct hclge_promisc_param *param)
4681{
4682 struct hclge_promisc_cfg_cmd *req;
4683 struct hclge_desc desc;
4684 int ret;
4685
4686 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4687
4688 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4689 req->vf_id = param->vf_id;
4690
4691
4692
4693
4694
4695
4696 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4697 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4698
4699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4700 if (ret)
4701 dev_err(&hdev->pdev->dev,
4702 "failed to set vport %d promisc mode, ret = %d.\n",
4703 param->vf_id, ret);
4704
4705 return ret;
4706}
4707
4708static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4709 bool en_uc, bool en_mc, bool en_bc,
4710 int vport_id)
4711{
4712 if (!param)
4713 return;
4714
4715 memset(param, 0, sizeof(struct hclge_promisc_param));
4716 if (en_uc)
4717 param->enable = HCLGE_PROMISC_EN_UC;
4718 if (en_mc)
4719 param->enable |= HCLGE_PROMISC_EN_MC;
4720 if (en_bc)
4721 param->enable |= HCLGE_PROMISC_EN_BC;
4722 param->vf_id = vport_id;
4723}
4724
4725int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4726 bool en_mc_pmc, bool en_bc_pmc)
4727{
4728 struct hclge_dev *hdev = vport->back;
4729 struct hclge_promisc_param param;
4730
4731 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4732 vport->vport_id);
4733 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4734}
4735
4736static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4737 bool en_mc_pmc)
4738{
4739 struct hclge_vport *vport = hclge_get_vport(handle);
4740 bool en_bc_pmc = true;
4741
4742
4743
4744
4745
4746 if (handle->pdev->revision == 0x20)
4747 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4748
4749 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4750 en_bc_pmc);
4751}
4752
4753static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4754{
4755 struct hclge_vport *vport = hclge_get_vport(handle);
4756 struct hclge_dev *hdev = vport->back;
4757
4758 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4759}
4760
4761static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4762{
4763 struct hclge_get_fd_mode_cmd *req;
4764 struct hclge_desc desc;
4765 int ret;
4766
4767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4768
4769 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4770
4771 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4772 if (ret) {
4773 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4774 return ret;
4775 }
4776
4777 *fd_mode = req->mode;
4778
4779 return ret;
4780}
4781
4782static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4783 u32 *stage1_entry_num,
4784 u32 *stage2_entry_num,
4785 u16 *stage1_counter_num,
4786 u16 *stage2_counter_num)
4787{
4788 struct hclge_get_fd_allocation_cmd *req;
4789 struct hclge_desc desc;
4790 int ret;
4791
4792 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4793
4794 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4795
4796 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4797 if (ret) {
4798 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4799 ret);
4800 return ret;
4801 }
4802
4803 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4804 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4805 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4806 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4807
4808 return ret;
4809}
4810
4811static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4812 enum HCLGE_FD_STAGE stage_num)
4813{
4814 struct hclge_set_fd_key_config_cmd *req;
4815 struct hclge_fd_key_cfg *stage;
4816 struct hclge_desc desc;
4817 int ret;
4818
4819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4820
4821 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4822 stage = &hdev->fd_cfg.key_cfg[stage_num];
4823 req->stage = stage_num;
4824 req->key_select = stage->key_sel;
4825 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4826 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4827 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4828 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4829 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4830 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4831
4832 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4833 if (ret)
4834 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4835
4836 return ret;
4837}
4838
4839static int hclge_init_fd_config(struct hclge_dev *hdev)
4840{
4841#define LOW_2_WORDS 0x03
4842 struct hclge_fd_key_cfg *key_cfg;
4843 int ret;
4844
4845 if (!hnae3_dev_fd_supported(hdev))
4846 return 0;
4847
4848 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4849 if (ret)
4850 return ret;
4851
4852 switch (hdev->fd_cfg.fd_mode) {
4853 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4854 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4855 break;
4856 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4857 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4858 break;
4859 default:
4860 dev_err(&hdev->pdev->dev,
4861 "Unsupported flow director mode %u\n",
4862 hdev->fd_cfg.fd_mode);
4863 return -EOPNOTSUPP;
4864 }
4865
4866 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4867 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4868 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4869 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4870 key_cfg->outer_sipv6_word_en = 0;
4871 key_cfg->outer_dipv6_word_en = 0;
4872
4873 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4874 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4875 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4876 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4877
4878
4879 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4880 key_cfg->tuple_active |=
4881 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4882
4883
4884
4885
4886 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4887
4888 ret = hclge_get_fd_allocation(hdev,
4889 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4890 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4891 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4892 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4893 if (ret)
4894 return ret;
4895
4896 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4897}
4898
4899static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4900 int loc, u8 *key, bool is_add)
4901{
4902 struct hclge_fd_tcam_config_1_cmd *req1;
4903 struct hclge_fd_tcam_config_2_cmd *req2;
4904 struct hclge_fd_tcam_config_3_cmd *req3;
4905 struct hclge_desc desc[3];
4906 int ret;
4907
4908 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4909 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4910 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4911 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4912 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4913
4914 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4915 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4916 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4917
4918 req1->stage = stage;
4919 req1->xy_sel = sel_x ? 1 : 0;
4920 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4921 req1->index = cpu_to_le32(loc);
4922 req1->entry_vld = sel_x ? is_add : 0;
4923
4924 if (key) {
4925 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4926 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4927 sizeof(req2->tcam_data));
4928 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4929 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4930 }
4931
4932 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4933 if (ret)
4934 dev_err(&hdev->pdev->dev,
4935 "config tcam key fail, ret=%d\n",
4936 ret);
4937
4938 return ret;
4939}
4940
4941static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4942 struct hclge_fd_ad_data *action)
4943{
4944 struct hclge_fd_ad_config_cmd *req;
4945 struct hclge_desc desc;
4946 u64 ad_data = 0;
4947 int ret;
4948
4949 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4950
4951 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4952 req->index = cpu_to_le32(loc);
4953 req->stage = stage;
4954
4955 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4956 action->write_rule_id_to_bd);
4957 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4958 action->rule_id);
4959 ad_data <<= 32;
4960 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4961 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4962 action->forward_to_direct_queue);
4963 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4964 action->queue_id);
4965 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4966 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4967 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4968 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4969 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4970 action->counter_id);
4971
4972 req->ad_data = cpu_to_le64(ad_data);
4973 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4974 if (ret)
4975 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4976
4977 return ret;
4978}
4979
4980static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4981 struct hclge_fd_rule *rule)
4982{
4983 u16 tmp_x_s, tmp_y_s;
4984 u32 tmp_x_l, tmp_y_l;
4985 int i;
4986
4987 if (rule->unused_tuple & tuple_bit)
4988 return true;
4989
4990 switch (tuple_bit) {
4991 case BIT(INNER_DST_MAC):
4992 for (i = 0; i < ETH_ALEN; i++) {
4993 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4994 rule->tuples_mask.dst_mac[i]);
4995 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4996 rule->tuples_mask.dst_mac[i]);
4997 }
4998
4999 return true;
5000 case BIT(INNER_SRC_MAC):
5001 for (i = 0; i < ETH_ALEN; i++) {
5002 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5003 rule->tuples.src_mac[i]);
5004 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5005 rule->tuples.src_mac[i]);
5006 }
5007
5008 return true;
5009 case BIT(INNER_VLAN_TAG_FST):
5010 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5011 rule->tuples_mask.vlan_tag1);
5012 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5013 rule->tuples_mask.vlan_tag1);
5014 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5015 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5016
5017 return true;
5018 case BIT(INNER_ETH_TYPE):
5019 calc_x(tmp_x_s, rule->tuples.ether_proto,
5020 rule->tuples_mask.ether_proto);
5021 calc_y(tmp_y_s, rule->tuples.ether_proto,
5022 rule->tuples_mask.ether_proto);
5023 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5024 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5025
5026 return true;
5027 case BIT(INNER_IP_TOS):
5028 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5029 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5030
5031 return true;
5032 case BIT(INNER_IP_PROTO):
5033 calc_x(*key_x, rule->tuples.ip_proto,
5034 rule->tuples_mask.ip_proto);
5035 calc_y(*key_y, rule->tuples.ip_proto,
5036 rule->tuples_mask.ip_proto);
5037
5038 return true;
5039 case BIT(INNER_SRC_IP):
5040 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5041 rule->tuples_mask.src_ip[IPV4_INDEX]);
5042 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5043 rule->tuples_mask.src_ip[IPV4_INDEX]);
5044 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5045 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5046
5047 return true;
5048 case BIT(INNER_DST_IP):
5049 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5050 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5051 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5052 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5053 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5054 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5055
5056 return true;
5057 case BIT(INNER_SRC_PORT):
5058 calc_x(tmp_x_s, rule->tuples.src_port,
5059 rule->tuples_mask.src_port);
5060 calc_y(tmp_y_s, rule->tuples.src_port,
5061 rule->tuples_mask.src_port);
5062 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5063 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5064
5065 return true;
5066 case BIT(INNER_DST_PORT):
5067 calc_x(tmp_x_s, rule->tuples.dst_port,
5068 rule->tuples_mask.dst_port);
5069 calc_y(tmp_y_s, rule->tuples.dst_port,
5070 rule->tuples_mask.dst_port);
5071 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5072 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5073
5074 return true;
5075 default:
5076 return false;
5077 }
5078}
5079
5080static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5081 u8 vf_id, u8 network_port_id)
5082{
5083 u32 port_number = 0;
5084
5085 if (port_type == HOST_PORT) {
5086 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5087 pf_id);
5088 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5089 vf_id);
5090 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5091 } else {
5092 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5093 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5094 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5095 }
5096
5097 return port_number;
5098}
5099
5100static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5101 __le32 *key_x, __le32 *key_y,
5102 struct hclge_fd_rule *rule)
5103{
5104 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5105 u8 cur_pos = 0, tuple_size, shift_bits;
5106 unsigned int i;
5107
5108 for (i = 0; i < MAX_META_DATA; i++) {
5109 tuple_size = meta_data_key_info[i].key_length;
5110 tuple_bit = key_cfg->meta_data_active & BIT(i);
5111
5112 switch (tuple_bit) {
5113 case BIT(ROCE_TYPE):
5114 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5115 cur_pos += tuple_size;
5116 break;
5117 case BIT(DST_VPORT):
5118 port_number = hclge_get_port_number(HOST_PORT, 0,
5119 rule->vf_id, 0);
5120 hnae3_set_field(meta_data,
5121 GENMASK(cur_pos + tuple_size, cur_pos),
5122 cur_pos, port_number);
5123 cur_pos += tuple_size;
5124 break;
5125 default:
5126 break;
5127 }
5128 }
5129
5130 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5131 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5132 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5133
5134 *key_x = cpu_to_le32(tmp_x << shift_bits);
5135 *key_y = cpu_to_le32(tmp_y << shift_bits);
5136}
5137
5138
5139
5140
5141
5142static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5143 struct hclge_fd_rule *rule)
5144{
5145 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5146 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5147 u8 *cur_key_x, *cur_key_y;
5148 u8 meta_data_region;
5149 u8 tuple_size;
5150 int ret;
5151 u32 i;
5152
5153 memset(key_x, 0, sizeof(key_x));
5154 memset(key_y, 0, sizeof(key_y));
5155 cur_key_x = key_x;
5156 cur_key_y = key_y;
5157
5158 for (i = 0 ; i < MAX_TUPLE; i++) {
5159 bool tuple_valid;
5160 u32 check_tuple;
5161
5162 tuple_size = tuple_key_info[i].key_length / 8;
5163 check_tuple = key_cfg->tuple_active & BIT(i);
5164
5165 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5166 cur_key_y, rule);
5167 if (tuple_valid) {
5168 cur_key_x += tuple_size;
5169 cur_key_y += tuple_size;
5170 }
5171 }
5172
5173 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5174 MAX_META_DATA_LENGTH / 8;
5175
5176 hclge_fd_convert_meta_data(key_cfg,
5177 (__le32 *)(key_x + meta_data_region),
5178 (__le32 *)(key_y + meta_data_region),
5179 rule);
5180
5181 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5182 true);
5183 if (ret) {
5184 dev_err(&hdev->pdev->dev,
5185 "fd key_y config fail, loc=%u, ret=%d\n",
5186 rule->queue_id, ret);
5187 return ret;
5188 }
5189
5190 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5191 true);
5192 if (ret)
5193 dev_err(&hdev->pdev->dev,
5194 "fd key_x config fail, loc=%u, ret=%d\n",
5195 rule->queue_id, ret);
5196 return ret;
5197}
5198
5199static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5200 struct hclge_fd_rule *rule)
5201{
5202 struct hclge_fd_ad_data ad_data;
5203
5204 ad_data.ad_id = rule->location;
5205
5206 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5207 ad_data.drop_packet = true;
5208 ad_data.forward_to_direct_queue = false;
5209 ad_data.queue_id = 0;
5210 } else {
5211 ad_data.drop_packet = false;
5212 ad_data.forward_to_direct_queue = true;
5213 ad_data.queue_id = rule->queue_id;
5214 }
5215
5216 ad_data.use_counter = false;
5217 ad_data.counter_id = 0;
5218
5219 ad_data.use_next_stage = false;
5220 ad_data.next_input_key = 0;
5221
5222 ad_data.write_rule_id_to_bd = true;
5223 ad_data.rule_id = rule->location;
5224
5225 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5226}
5227
5228static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5229 u32 *unused_tuple)
5230{
5231 if (!spec || !unused_tuple)
5232 return -EINVAL;
5233
5234 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5235
5236 if (!spec->ip4src)
5237 *unused_tuple |= BIT(INNER_SRC_IP);
5238
5239 if (!spec->ip4dst)
5240 *unused_tuple |= BIT(INNER_DST_IP);
5241
5242 if (!spec->psrc)
5243 *unused_tuple |= BIT(INNER_SRC_PORT);
5244
5245 if (!spec->pdst)
5246 *unused_tuple |= BIT(INNER_DST_PORT);
5247
5248 if (!spec->tos)
5249 *unused_tuple |= BIT(INNER_IP_TOS);
5250
5251 return 0;
5252}
5253
5254static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5255 u32 *unused_tuple)
5256{
5257 if (!spec || !unused_tuple)
5258 return -EINVAL;
5259
5260 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5261 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5262
5263 if (!spec->ip4src)
5264 *unused_tuple |= BIT(INNER_SRC_IP);
5265
5266 if (!spec->ip4dst)
5267 *unused_tuple |= BIT(INNER_DST_IP);
5268
5269 if (!spec->tos)
5270 *unused_tuple |= BIT(INNER_IP_TOS);
5271
5272 if (!spec->proto)
5273 *unused_tuple |= BIT(INNER_IP_PROTO);
5274
5275 if (spec->l4_4_bytes)
5276 return -EOPNOTSUPP;
5277
5278 if (spec->ip_ver != ETH_RX_NFC_IP4)
5279 return -EOPNOTSUPP;
5280
5281 return 0;
5282}
5283
5284static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5285 u32 *unused_tuple)
5286{
5287 if (!spec || !unused_tuple)
5288 return -EINVAL;
5289
5290 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5291 BIT(INNER_IP_TOS);
5292
5293
5294 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5295 !spec->ip6src[2] && !spec->ip6src[3])
5296 *unused_tuple |= BIT(INNER_SRC_IP);
5297
5298 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5299 !spec->ip6dst[2] && !spec->ip6dst[3])
5300 *unused_tuple |= BIT(INNER_DST_IP);
5301
5302 if (!spec->psrc)
5303 *unused_tuple |= BIT(INNER_SRC_PORT);
5304
5305 if (!spec->pdst)
5306 *unused_tuple |= BIT(INNER_DST_PORT);
5307
5308 if (spec->tclass)
5309 return -EOPNOTSUPP;
5310
5311 return 0;
5312}
5313
5314static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5315 u32 *unused_tuple)
5316{
5317 if (!spec || !unused_tuple)
5318 return -EINVAL;
5319
5320 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5321 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5322
5323
5324 if (!spec->ip6src[0] && !spec->ip6src[1] &&
5325 !spec->ip6src[2] && !spec->ip6src[3])
5326 *unused_tuple |= BIT(INNER_SRC_IP);
5327
5328 if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5329 !spec->ip6dst[2] && !spec->ip6dst[3])
5330 *unused_tuple |= BIT(INNER_DST_IP);
5331
5332 if (!spec->l4_proto)
5333 *unused_tuple |= BIT(INNER_IP_PROTO);
5334
5335 if (spec->tclass)
5336 return -EOPNOTSUPP;
5337
5338 if (spec->l4_4_bytes)
5339 return -EOPNOTSUPP;
5340
5341 return 0;
5342}
5343
5344static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5345{
5346 if (!spec || !unused_tuple)
5347 return -EINVAL;
5348
5349 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5350 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5351 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5352
5353 if (is_zero_ether_addr(spec->h_source))
5354 *unused_tuple |= BIT(INNER_SRC_MAC);
5355
5356 if (is_zero_ether_addr(spec->h_dest))
5357 *unused_tuple |= BIT(INNER_DST_MAC);
5358
5359 if (!spec->h_proto)
5360 *unused_tuple |= BIT(INNER_ETH_TYPE);
5361
5362 return 0;
5363}
5364
5365static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5366 struct ethtool_rx_flow_spec *fs,
5367 u32 *unused_tuple)
5368{
5369 if (fs->flow_type & FLOW_EXT) {
5370 if (fs->h_ext.vlan_etype) {
5371 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5372 return -EOPNOTSUPP;
5373 }
5374
5375 if (!fs->h_ext.vlan_tci)
5376 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5377
5378 if (fs->m_ext.vlan_tci &&
5379 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5380 dev_err(&hdev->pdev->dev,
5381 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5382 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5383 return -EINVAL;
5384 }
5385 } else {
5386 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5387 }
5388
5389 if (fs->flow_type & FLOW_MAC_EXT) {
5390 if (hdev->fd_cfg.fd_mode !=
5391 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5392 dev_err(&hdev->pdev->dev,
5393 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5394 return -EOPNOTSUPP;
5395 }
5396
5397 if (is_zero_ether_addr(fs->h_ext.h_dest))
5398 *unused_tuple |= BIT(INNER_DST_MAC);
5399 else
5400 *unused_tuple &= ~BIT(INNER_DST_MAC);
5401 }
5402
5403 return 0;
5404}
5405
5406static int hclge_fd_check_spec(struct hclge_dev *hdev,
5407 struct ethtool_rx_flow_spec *fs,
5408 u32 *unused_tuple)
5409{
5410 u32 flow_type;
5411 int ret;
5412
5413 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5414 dev_err(&hdev->pdev->dev,
5415 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5416 fs->location,
5417 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5418 return -EINVAL;
5419 }
5420
5421 if ((fs->flow_type & FLOW_EXT) &&
5422 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5423 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5424 return -EOPNOTSUPP;
5425 }
5426
5427 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5428 switch (flow_type) {
5429 case SCTP_V4_FLOW:
5430 case TCP_V4_FLOW:
5431 case UDP_V4_FLOW:
5432 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5433 unused_tuple);
5434 break;
5435 case IP_USER_FLOW:
5436 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5437 unused_tuple);
5438 break;
5439 case SCTP_V6_FLOW:
5440 case TCP_V6_FLOW:
5441 case UDP_V6_FLOW:
5442 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5443 unused_tuple);
5444 break;
5445 case IPV6_USER_FLOW:
5446 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5447 unused_tuple);
5448 break;
5449 case ETHER_FLOW:
5450 if (hdev->fd_cfg.fd_mode !=
5451 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5452 dev_err(&hdev->pdev->dev,
5453 "ETHER_FLOW is not supported in current fd mode!\n");
5454 return -EOPNOTSUPP;
5455 }
5456
5457 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5458 unused_tuple);
5459 break;
5460 default:
5461 dev_err(&hdev->pdev->dev,
5462 "unsupported protocol type, protocol type = %#x\n",
5463 flow_type);
5464 return -EOPNOTSUPP;
5465 }
5466
5467 if (ret) {
5468 dev_err(&hdev->pdev->dev,
5469 "failed to check flow union tuple, ret = %d\n",
5470 ret);
5471 return ret;
5472 }
5473
5474 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5475}
5476
5477static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5478{
5479 struct hclge_fd_rule *rule = NULL;
5480 struct hlist_node *node2;
5481
5482 spin_lock_bh(&hdev->fd_rule_lock);
5483 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5484 if (rule->location >= location)
5485 break;
5486 }
5487
5488 spin_unlock_bh(&hdev->fd_rule_lock);
5489
5490 return rule && rule->location == location;
5491}
5492
5493
5494static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5495 struct hclge_fd_rule *new_rule,
5496 u16 location,
5497 bool is_add)
5498{
5499 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5500 struct hlist_node *node2;
5501
5502 if (is_add && !new_rule)
5503 return -EINVAL;
5504
5505 hlist_for_each_entry_safe(rule, node2,
5506 &hdev->fd_rule_list, rule_node) {
5507 if (rule->location >= location)
5508 break;
5509 parent = rule;
5510 }
5511
5512 if (rule && rule->location == location) {
5513 hlist_del(&rule->rule_node);
5514 kfree(rule);
5515 hdev->hclge_fd_rule_num--;
5516
5517 if (!is_add) {
5518 if (!hdev->hclge_fd_rule_num)
5519 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5520 clear_bit(location, hdev->fd_bmap);
5521
5522 return 0;
5523 }
5524 } else if (!is_add) {
5525 dev_err(&hdev->pdev->dev,
5526 "delete fail, rule %u is inexistent\n",
5527 location);
5528 return -EINVAL;
5529 }
5530
5531 INIT_HLIST_NODE(&new_rule->rule_node);
5532
5533 if (parent)
5534 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5535 else
5536 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5537
5538 set_bit(location, hdev->fd_bmap);
5539 hdev->hclge_fd_rule_num++;
5540 hdev->fd_active_type = new_rule->rule_type;
5541
5542 return 0;
5543}
5544
5545static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5546 struct ethtool_rx_flow_spec *fs,
5547 struct hclge_fd_rule *rule)
5548{
5549 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5550
5551 switch (flow_type) {
5552 case SCTP_V4_FLOW:
5553 case TCP_V4_FLOW:
5554 case UDP_V4_FLOW:
5555 rule->tuples.src_ip[IPV4_INDEX] =
5556 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5557 rule->tuples_mask.src_ip[IPV4_INDEX] =
5558 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5559
5560 rule->tuples.dst_ip[IPV4_INDEX] =
5561 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5562 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5563 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5564
5565 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5566 rule->tuples_mask.src_port =
5567 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5568
5569 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5570 rule->tuples_mask.dst_port =
5571 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5572
5573 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5574 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5575
5576 rule->tuples.ether_proto = ETH_P_IP;
5577 rule->tuples_mask.ether_proto = 0xFFFF;
5578
5579 break;
5580 case IP_USER_FLOW:
5581 rule->tuples.src_ip[IPV4_INDEX] =
5582 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5583 rule->tuples_mask.src_ip[IPV4_INDEX] =
5584 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5585
5586 rule->tuples.dst_ip[IPV4_INDEX] =
5587 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5588 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5589 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5590
5591 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5592 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5593
5594 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5595 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5596
5597 rule->tuples.ether_proto = ETH_P_IP;
5598 rule->tuples_mask.ether_proto = 0xFFFF;
5599
5600 break;
5601 case SCTP_V6_FLOW:
5602 case TCP_V6_FLOW:
5603 case UDP_V6_FLOW:
5604 be32_to_cpu_array(rule->tuples.src_ip,
5605 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5606 be32_to_cpu_array(rule->tuples_mask.src_ip,
5607 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5608
5609 be32_to_cpu_array(rule->tuples.dst_ip,
5610 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5611 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5612 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5613
5614 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5615 rule->tuples_mask.src_port =
5616 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5617
5618 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5619 rule->tuples_mask.dst_port =
5620 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5621
5622 rule->tuples.ether_proto = ETH_P_IPV6;
5623 rule->tuples_mask.ether_proto = 0xFFFF;
5624
5625 break;
5626 case IPV6_USER_FLOW:
5627 be32_to_cpu_array(rule->tuples.src_ip,
5628 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5629 be32_to_cpu_array(rule->tuples_mask.src_ip,
5630 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5631
5632 be32_to_cpu_array(rule->tuples.dst_ip,
5633 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5634 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5635 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5636
5637 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5638 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5639
5640 rule->tuples.ether_proto = ETH_P_IPV6;
5641 rule->tuples_mask.ether_proto = 0xFFFF;
5642
5643 break;
5644 case ETHER_FLOW:
5645 ether_addr_copy(rule->tuples.src_mac,
5646 fs->h_u.ether_spec.h_source);
5647 ether_addr_copy(rule->tuples_mask.src_mac,
5648 fs->m_u.ether_spec.h_source);
5649
5650 ether_addr_copy(rule->tuples.dst_mac,
5651 fs->h_u.ether_spec.h_dest);
5652 ether_addr_copy(rule->tuples_mask.dst_mac,
5653 fs->m_u.ether_spec.h_dest);
5654
5655 rule->tuples.ether_proto =
5656 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5657 rule->tuples_mask.ether_proto =
5658 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5659
5660 break;
5661 default:
5662 return -EOPNOTSUPP;
5663 }
5664
5665 switch (flow_type) {
5666 case SCTP_V4_FLOW:
5667 case SCTP_V6_FLOW:
5668 rule->tuples.ip_proto = IPPROTO_SCTP;
5669 rule->tuples_mask.ip_proto = 0xFF;
5670 break;
5671 case TCP_V4_FLOW:
5672 case TCP_V6_FLOW:
5673 rule->tuples.ip_proto = IPPROTO_TCP;
5674 rule->tuples_mask.ip_proto = 0xFF;
5675 break;
5676 case UDP_V4_FLOW:
5677 case UDP_V6_FLOW:
5678 rule->tuples.ip_proto = IPPROTO_UDP;
5679 rule->tuples_mask.ip_proto = 0xFF;
5680 break;
5681 default:
5682 break;
5683 }
5684
5685 if (fs->flow_type & FLOW_EXT) {
5686 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5687 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5688 }
5689
5690 if (fs->flow_type & FLOW_MAC_EXT) {
5691 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5692 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5693 }
5694
5695 return 0;
5696}
5697
5698
5699static int hclge_fd_config_rule(struct hclge_dev *hdev,
5700 struct hclge_fd_rule *rule)
5701{
5702 int ret;
5703
5704 if (!rule) {
5705 dev_err(&hdev->pdev->dev,
5706 "The flow director rule is NULL\n");
5707 return -EINVAL;
5708 }
5709
5710
5711 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5712
5713 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5714 if (ret)
5715 goto clear_rule;
5716
5717 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5718 if (ret)
5719 goto clear_rule;
5720
5721 return 0;
5722
5723clear_rule:
5724 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5725 return ret;
5726}
5727
5728static int hclge_add_fd_entry(struct hnae3_handle *handle,
5729 struct ethtool_rxnfc *cmd)
5730{
5731 struct hclge_vport *vport = hclge_get_vport(handle);
5732 struct hclge_dev *hdev = vport->back;
5733 u16 dst_vport_id = 0, q_index = 0;
5734 struct ethtool_rx_flow_spec *fs;
5735 struct hclge_fd_rule *rule;
5736 u32 unused = 0;
5737 u8 action;
5738 int ret;
5739
5740 if (!hnae3_dev_fd_supported(hdev)) {
5741 dev_err(&hdev->pdev->dev,
5742 "flow table director is not supported\n");
5743 return -EOPNOTSUPP;
5744 }
5745
5746 if (!hdev->fd_en) {
5747 dev_err(&hdev->pdev->dev,
5748 "please enable flow director first\n");
5749 return -EOPNOTSUPP;
5750 }
5751
5752 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5753
5754 ret = hclge_fd_check_spec(hdev, fs, &unused);
5755 if (ret)
5756 return ret;
5757
5758 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5759 action = HCLGE_FD_ACTION_DROP_PACKET;
5760 } else {
5761 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5762 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5763 u16 tqps;
5764
5765 if (vf > hdev->num_req_vfs) {
5766 dev_err(&hdev->pdev->dev,
5767 "Error: vf id (%u) > max vf num (%u)\n",
5768 vf, hdev->num_req_vfs);
5769 return -EINVAL;
5770 }
5771
5772 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5773 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5774
5775 if (ring >= tqps) {
5776 dev_err(&hdev->pdev->dev,
5777 "Error: queue id (%u) > max tqp num (%u)\n",
5778 ring, tqps - 1);
5779 return -EINVAL;
5780 }
5781
5782 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5783 q_index = ring;
5784 }
5785
5786 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5787 if (!rule)
5788 return -ENOMEM;
5789
5790 ret = hclge_fd_get_tuple(hdev, fs, rule);
5791 if (ret) {
5792 kfree(rule);
5793 return ret;
5794 }
5795
5796 rule->flow_type = fs->flow_type;
5797 rule->location = fs->location;
5798 rule->unused_tuple = unused;
5799 rule->vf_id = dst_vport_id;
5800 rule->queue_id = q_index;
5801 rule->action = action;
5802 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5803
5804
5805
5806
5807 spin_lock_bh(&hdev->fd_rule_lock);
5808 hclge_clear_arfs_rules(handle);
5809
5810 ret = hclge_fd_config_rule(hdev, rule);
5811
5812 spin_unlock_bh(&hdev->fd_rule_lock);
5813
5814 return ret;
5815}
5816
5817static int hclge_del_fd_entry(struct hnae3_handle *handle,
5818 struct ethtool_rxnfc *cmd)
5819{
5820 struct hclge_vport *vport = hclge_get_vport(handle);
5821 struct hclge_dev *hdev = vport->back;
5822 struct ethtool_rx_flow_spec *fs;
5823 int ret;
5824
5825 if (!hnae3_dev_fd_supported(hdev))
5826 return -EOPNOTSUPP;
5827
5828 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5829
5830 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5831 return -EINVAL;
5832
5833 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5834 dev_err(&hdev->pdev->dev,
5835 "Delete fail, rule %u is inexistent\n", fs->location);
5836 return -ENOENT;
5837 }
5838
5839 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5840 NULL, false);
5841 if (ret)
5842 return ret;
5843
5844 spin_lock_bh(&hdev->fd_rule_lock);
5845 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5846
5847 spin_unlock_bh(&hdev->fd_rule_lock);
5848
5849 return ret;
5850}
5851
5852
5853static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5854 bool clear_list)
5855{
5856 struct hclge_vport *vport = hclge_get_vport(handle);
5857 struct hclge_dev *hdev = vport->back;
5858 struct hclge_fd_rule *rule;
5859 struct hlist_node *node;
5860 u16 location;
5861
5862 if (!hnae3_dev_fd_supported(hdev))
5863 return;
5864
5865 for_each_set_bit(location, hdev->fd_bmap,
5866 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5867 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5868 NULL, false);
5869
5870 if (clear_list) {
5871 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5872 rule_node) {
5873 hlist_del(&rule->rule_node);
5874 kfree(rule);
5875 }
5876 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5877 hdev->hclge_fd_rule_num = 0;
5878 bitmap_zero(hdev->fd_bmap,
5879 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5880 }
5881}
5882
5883static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5884{
5885 struct hclge_vport *vport = hclge_get_vport(handle);
5886 struct hclge_dev *hdev = vport->back;
5887 struct hclge_fd_rule *rule;
5888 struct hlist_node *node;
5889 int ret;
5890
5891
5892
5893
5894
5895 if (!hnae3_dev_fd_supported(hdev))
5896 return 0;
5897
5898
5899 if (!hdev->fd_en)
5900 return 0;
5901
5902 spin_lock_bh(&hdev->fd_rule_lock);
5903 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5904 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5905 if (!ret)
5906 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5907
5908 if (ret) {
5909 dev_warn(&hdev->pdev->dev,
5910 "Restore rule %u failed, remove it\n",
5911 rule->location);
5912 clear_bit(rule->location, hdev->fd_bmap);
5913 hlist_del(&rule->rule_node);
5914 kfree(rule);
5915 hdev->hclge_fd_rule_num--;
5916 }
5917 }
5918
5919 if (hdev->hclge_fd_rule_num)
5920 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5921
5922 spin_unlock_bh(&hdev->fd_rule_lock);
5923
5924 return 0;
5925}
5926
5927static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5928 struct ethtool_rxnfc *cmd)
5929{
5930 struct hclge_vport *vport = hclge_get_vport(handle);
5931 struct hclge_dev *hdev = vport->back;
5932
5933 if (!hnae3_dev_fd_supported(hdev))
5934 return -EOPNOTSUPP;
5935
5936 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5937 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5938
5939 return 0;
5940}
5941
5942static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5943 struct ethtool_tcpip4_spec *spec,
5944 struct ethtool_tcpip4_spec *spec_mask)
5945{
5946 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5947 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5948 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5949
5950 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5951 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5952 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5953
5954 spec->psrc = cpu_to_be16(rule->tuples.src_port);
5955 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5956 0 : cpu_to_be16(rule->tuples_mask.src_port);
5957
5958 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5959 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5960 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5961
5962 spec->tos = rule->tuples.ip_tos;
5963 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5964 0 : rule->tuples_mask.ip_tos;
5965}
5966
5967static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5968 struct ethtool_usrip4_spec *spec,
5969 struct ethtool_usrip4_spec *spec_mask)
5970{
5971 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5972 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5973 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5974
5975 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5976 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5977 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5978
5979 spec->tos = rule->tuples.ip_tos;
5980 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5981 0 : rule->tuples_mask.ip_tos;
5982
5983 spec->proto = rule->tuples.ip_proto;
5984 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5985 0 : rule->tuples_mask.ip_proto;
5986
5987 spec->ip_ver = ETH_RX_NFC_IP4;
5988}
5989
5990static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5991 struct ethtool_tcpip6_spec *spec,
5992 struct ethtool_tcpip6_spec *spec_mask)
5993{
5994 cpu_to_be32_array(spec->ip6src,
5995 rule->tuples.src_ip, IPV6_SIZE);
5996 cpu_to_be32_array(spec->ip6dst,
5997 rule->tuples.dst_ip, IPV6_SIZE);
5998 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5999 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6000 else
6001 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6002 IPV6_SIZE);
6003
6004 if (rule->unused_tuple & BIT(INNER_DST_IP))
6005 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6006 else
6007 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6008 IPV6_SIZE);
6009
6010 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6011 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6012 0 : cpu_to_be16(rule->tuples_mask.src_port);
6013
6014 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6015 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6016 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6017}
6018
6019static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6020 struct ethtool_usrip6_spec *spec,
6021 struct ethtool_usrip6_spec *spec_mask)
6022{
6023 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6024 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6025 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6026 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6027 else
6028 cpu_to_be32_array(spec_mask->ip6src,
6029 rule->tuples_mask.src_ip, IPV6_SIZE);
6030
6031 if (rule->unused_tuple & BIT(INNER_DST_IP))
6032 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6033 else
6034 cpu_to_be32_array(spec_mask->ip6dst,
6035 rule->tuples_mask.dst_ip, IPV6_SIZE);
6036
6037 spec->l4_proto = rule->tuples.ip_proto;
6038 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6039 0 : rule->tuples_mask.ip_proto;
6040}
6041
6042static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6043 struct ethhdr *spec,
6044 struct ethhdr *spec_mask)
6045{
6046 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6047 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6048
6049 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6050 eth_zero_addr(spec_mask->h_source);
6051 else
6052 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6053
6054 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6055 eth_zero_addr(spec_mask->h_dest);
6056 else
6057 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6058
6059 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6060 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6061 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6062}
6063
6064static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6065 struct hclge_fd_rule *rule)
6066{
6067 if (fs->flow_type & FLOW_EXT) {
6068 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6069 fs->m_ext.vlan_tci =
6070 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6071 cpu_to_be16(VLAN_VID_MASK) :
6072 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6073 }
6074
6075 if (fs->flow_type & FLOW_MAC_EXT) {
6076 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6077 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6078 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6079 else
6080 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6081 rule->tuples_mask.dst_mac);
6082 }
6083}
6084
6085static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6086 struct ethtool_rxnfc *cmd)
6087{
6088 struct hclge_vport *vport = hclge_get_vport(handle);
6089 struct hclge_fd_rule *rule = NULL;
6090 struct hclge_dev *hdev = vport->back;
6091 struct ethtool_rx_flow_spec *fs;
6092 struct hlist_node *node2;
6093
6094 if (!hnae3_dev_fd_supported(hdev))
6095 return -EOPNOTSUPP;
6096
6097 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6098
6099 spin_lock_bh(&hdev->fd_rule_lock);
6100
6101 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6102 if (rule->location >= fs->location)
6103 break;
6104 }
6105
6106 if (!rule || fs->location != rule->location) {
6107 spin_unlock_bh(&hdev->fd_rule_lock);
6108
6109 return -ENOENT;
6110 }
6111
6112 fs->flow_type = rule->flow_type;
6113 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6114 case SCTP_V4_FLOW:
6115 case TCP_V4_FLOW:
6116 case UDP_V4_FLOW:
6117 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6118 &fs->m_u.tcp_ip4_spec);
6119 break;
6120 case IP_USER_FLOW:
6121 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6122 &fs->m_u.usr_ip4_spec);
6123 break;
6124 case SCTP_V6_FLOW:
6125 case TCP_V6_FLOW:
6126 case UDP_V6_FLOW:
6127 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6128 &fs->m_u.tcp_ip6_spec);
6129 break;
6130 case IPV6_USER_FLOW:
6131 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6132 &fs->m_u.usr_ip6_spec);
6133 break;
6134
6135
6136
6137
6138 default:
6139 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6140 &fs->m_u.ether_spec);
6141 break;
6142 }
6143
6144 hclge_fd_get_ext_info(fs, rule);
6145
6146 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6147 fs->ring_cookie = RX_CLS_FLOW_DISC;
6148 } else {
6149 u64 vf_id;
6150
6151 fs->ring_cookie = rule->queue_id;
6152 vf_id = rule->vf_id;
6153 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6154 fs->ring_cookie |= vf_id;
6155 }
6156
6157 spin_unlock_bh(&hdev->fd_rule_lock);
6158
6159 return 0;
6160}
6161
6162static int hclge_get_all_rules(struct hnae3_handle *handle,
6163 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6164{
6165 struct hclge_vport *vport = hclge_get_vport(handle);
6166 struct hclge_dev *hdev = vport->back;
6167 struct hclge_fd_rule *rule;
6168 struct hlist_node *node2;
6169 int cnt = 0;
6170
6171 if (!hnae3_dev_fd_supported(hdev))
6172 return -EOPNOTSUPP;
6173
6174 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6175
6176 spin_lock_bh(&hdev->fd_rule_lock);
6177 hlist_for_each_entry_safe(rule, node2,
6178 &hdev->fd_rule_list, rule_node) {
6179 if (cnt == cmd->rule_cnt) {
6180 spin_unlock_bh(&hdev->fd_rule_lock);
6181 return -EMSGSIZE;
6182 }
6183
6184 rule_locs[cnt] = rule->location;
6185 cnt++;
6186 }
6187
6188 spin_unlock_bh(&hdev->fd_rule_lock);
6189
6190 cmd->rule_cnt = cnt;
6191
6192 return 0;
6193}
6194
6195static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6196 struct hclge_fd_rule_tuples *tuples)
6197{
6198#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6199#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6200
6201 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6202 tuples->ip_proto = fkeys->basic.ip_proto;
6203 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6204
6205 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6206 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6207 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6208 } else {
6209 int i;
6210
6211 for (i = 0; i < IPV6_SIZE; i++) {
6212 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6213 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6214 }
6215 }
6216}
6217
6218
6219static struct hclge_fd_rule *
6220hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6221 const struct hclge_fd_rule_tuples *tuples)
6222{
6223 struct hclge_fd_rule *rule = NULL;
6224 struct hlist_node *node;
6225
6226 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6227 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6228 return rule;
6229 }
6230
6231 return NULL;
6232}
6233
6234static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6235 struct hclge_fd_rule *rule)
6236{
6237 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6238 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6239 BIT(INNER_SRC_PORT);
6240 rule->action = 0;
6241 rule->vf_id = 0;
6242 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6243 if (tuples->ether_proto == ETH_P_IP) {
6244 if (tuples->ip_proto == IPPROTO_TCP)
6245 rule->flow_type = TCP_V4_FLOW;
6246 else
6247 rule->flow_type = UDP_V4_FLOW;
6248 } else {
6249 if (tuples->ip_proto == IPPROTO_TCP)
6250 rule->flow_type = TCP_V6_FLOW;
6251 else
6252 rule->flow_type = UDP_V6_FLOW;
6253 }
6254 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6255 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6256}
6257
6258static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6259 u16 flow_id, struct flow_keys *fkeys)
6260{
6261 struct hclge_vport *vport = hclge_get_vport(handle);
6262 struct hclge_fd_rule_tuples new_tuples = {};
6263 struct hclge_dev *hdev = vport->back;
6264 struct hclge_fd_rule *rule;
6265 u16 tmp_queue_id;
6266 u16 bit_id;
6267 int ret;
6268
6269 if (!hnae3_dev_fd_supported(hdev))
6270 return -EOPNOTSUPP;
6271
6272
6273
6274
6275 spin_lock_bh(&hdev->fd_rule_lock);
6276 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6277 spin_unlock_bh(&hdev->fd_rule_lock);
6278 return -EOPNOTSUPP;
6279 }
6280
6281 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6282
6283
6284
6285
6286
6287
6288 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6289 if (!rule) {
6290 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6291 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6292 spin_unlock_bh(&hdev->fd_rule_lock);
6293 return -ENOSPC;
6294 }
6295
6296 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6297 if (!rule) {
6298 spin_unlock_bh(&hdev->fd_rule_lock);
6299 return -ENOMEM;
6300 }
6301
6302 set_bit(bit_id, hdev->fd_bmap);
6303 rule->location = bit_id;
6304 rule->flow_id = flow_id;
6305 rule->queue_id = queue_id;
6306 hclge_fd_build_arfs_rule(&new_tuples, rule);
6307 ret = hclge_fd_config_rule(hdev, rule);
6308
6309 spin_unlock_bh(&hdev->fd_rule_lock);
6310
6311 if (ret)
6312 return ret;
6313
6314 return rule->location;
6315 }
6316
6317 spin_unlock_bh(&hdev->fd_rule_lock);
6318
6319 if (rule->queue_id == queue_id)
6320 return rule->location;
6321
6322 tmp_queue_id = rule->queue_id;
6323 rule->queue_id = queue_id;
6324 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6325 if (ret) {
6326 rule->queue_id = tmp_queue_id;
6327 return ret;
6328 }
6329
6330 return rule->location;
6331}
6332
6333static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6334{
6335#ifdef CONFIG_RFS_ACCEL
6336 struct hnae3_handle *handle = &hdev->vport[0].nic;
6337 struct hclge_fd_rule *rule;
6338 struct hlist_node *node;
6339 HLIST_HEAD(del_list);
6340
6341 spin_lock_bh(&hdev->fd_rule_lock);
6342 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6343 spin_unlock_bh(&hdev->fd_rule_lock);
6344 return;
6345 }
6346 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6347 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6348 rule->flow_id, rule->location)) {
6349 hlist_del_init(&rule->rule_node);
6350 hlist_add_head(&rule->rule_node, &del_list);
6351 hdev->hclge_fd_rule_num--;
6352 clear_bit(rule->location, hdev->fd_bmap);
6353 }
6354 }
6355 spin_unlock_bh(&hdev->fd_rule_lock);
6356
6357 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6358 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6359 rule->location, NULL, false);
6360 kfree(rule);
6361 }
6362#endif
6363}
6364
6365
6366static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6367{
6368#ifdef CONFIG_RFS_ACCEL
6369 struct hclge_vport *vport = hclge_get_vport(handle);
6370 struct hclge_dev *hdev = vport->back;
6371
6372 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6373 hclge_del_all_fd_entries(handle, true);
6374#endif
6375}
6376
6377static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6378{
6379 struct hclge_vport *vport = hclge_get_vport(handle);
6380 struct hclge_dev *hdev = vport->back;
6381
6382 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6383 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6384}
6385
6386static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6387{
6388 struct hclge_vport *vport = hclge_get_vport(handle);
6389 struct hclge_dev *hdev = vport->back;
6390
6391 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6392}
6393
6394static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6395{
6396 struct hclge_vport *vport = hclge_get_vport(handle);
6397 struct hclge_dev *hdev = vport->back;
6398
6399 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6400}
6401
6402static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6403{
6404 struct hclge_vport *vport = hclge_get_vport(handle);
6405 struct hclge_dev *hdev = vport->back;
6406
6407 return hdev->rst_stats.hw_reset_done_cnt;
6408}
6409
6410static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6411{
6412 struct hclge_vport *vport = hclge_get_vport(handle);
6413 struct hclge_dev *hdev = vport->back;
6414 bool clear;
6415
6416 hdev->fd_en = enable;
6417 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6418
6419 if (!enable) {
6420 spin_lock_bh(&hdev->fd_rule_lock);
6421 hclge_del_all_fd_entries(handle, clear);
6422 spin_unlock_bh(&hdev->fd_rule_lock);
6423 } else {
6424 hclge_restore_fd_entries(handle);
6425 }
6426}
6427
6428static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6429{
6430 struct hclge_desc desc;
6431 struct hclge_config_mac_mode_cmd *req =
6432 (struct hclge_config_mac_mode_cmd *)desc.data;
6433 u32 loop_en = 0;
6434 int ret;
6435
6436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6437
6438 if (enable) {
6439 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6440 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6441 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6442 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6443 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6444 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6445 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6446 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6447 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6448 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6449 }
6450
6451 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6452
6453 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6454 if (ret)
6455 dev_err(&hdev->pdev->dev,
6456 "mac enable fail, ret =%d.\n", ret);
6457}
6458
6459static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6460 u8 switch_param, u8 param_mask)
6461{
6462 struct hclge_mac_vlan_switch_cmd *req;
6463 struct hclge_desc desc;
6464 u32 func_id;
6465 int ret;
6466
6467 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6468 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6469
6470
6471 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6472 true);
6473 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6474 req->func_id = cpu_to_le32(func_id);
6475
6476 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6477 if (ret) {
6478 dev_err(&hdev->pdev->dev,
6479 "read mac vlan switch parameter fail, ret = %d\n", ret);
6480 return ret;
6481 }
6482
6483
6484 hclge_cmd_reuse_desc(&desc, false);
6485 req->switch_param = (req->switch_param & param_mask) | switch_param;
6486 req->param_mask = param_mask;
6487
6488 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6489 if (ret)
6490 dev_err(&hdev->pdev->dev,
6491 "set mac vlan switch parameter fail, ret = %d\n", ret);
6492 return ret;
6493}
6494
6495static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6496 int link_ret)
6497{
6498#define HCLGE_PHY_LINK_STATUS_NUM 200
6499
6500 struct phy_device *phydev = hdev->hw.mac.phydev;
6501 int i = 0;
6502 int ret;
6503
6504 do {
6505 ret = phy_read_status(phydev);
6506 if (ret) {
6507 dev_err(&hdev->pdev->dev,
6508 "phy update link status fail, ret = %d\n", ret);
6509 return;
6510 }
6511
6512 if (phydev->link == link_ret)
6513 break;
6514
6515 msleep(HCLGE_LINK_STATUS_MS);
6516 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6517}
6518
6519static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6520{
6521#define HCLGE_MAC_LINK_STATUS_NUM 100
6522
6523 int link_status;
6524 int i = 0;
6525 int ret;
6526
6527 do {
6528 ret = hclge_get_mac_link_status(hdev, &link_status);
6529 if (ret)
6530 return ret;
6531 if (link_status == link_ret)
6532 return 0;
6533
6534 msleep(HCLGE_LINK_STATUS_MS);
6535 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6536 return -EBUSY;
6537}
6538
6539static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6540 bool is_phy)
6541{
6542 int link_ret;
6543
6544 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6545
6546 if (is_phy)
6547 hclge_phy_link_status_wait(hdev, link_ret);
6548
6549 return hclge_mac_link_status_wait(hdev, link_ret);
6550}
6551
6552static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6553{
6554 struct hclge_config_mac_mode_cmd *req;
6555 struct hclge_desc desc;
6556 u32 loop_en;
6557 int ret;
6558
6559 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6560
6561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6562 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6563 if (ret) {
6564 dev_err(&hdev->pdev->dev,
6565 "mac loopback get fail, ret =%d.\n", ret);
6566 return ret;
6567 }
6568
6569
6570 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6571 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6572
6573 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6574
6575
6576
6577
6578 hclge_cmd_reuse_desc(&desc, false);
6579 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6580 if (ret)
6581 dev_err(&hdev->pdev->dev,
6582 "mac loopback set fail, ret =%d.\n", ret);
6583 return ret;
6584}
6585
6586static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6587 enum hnae3_loop loop_mode)
6588{
6589#define HCLGE_SERDES_RETRY_MS 10
6590#define HCLGE_SERDES_RETRY_NUM 100
6591
6592 struct hclge_serdes_lb_cmd *req;
6593 struct hclge_desc desc;
6594 int ret, i = 0;
6595 u8 loop_mode_b;
6596
6597 req = (struct hclge_serdes_lb_cmd *)desc.data;
6598 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6599
6600 switch (loop_mode) {
6601 case HNAE3_LOOP_SERIAL_SERDES:
6602 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6603 break;
6604 case HNAE3_LOOP_PARALLEL_SERDES:
6605 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6606 break;
6607 default:
6608 dev_err(&hdev->pdev->dev,
6609 "unsupported serdes loopback mode %d\n", loop_mode);
6610 return -ENOTSUPP;
6611 }
6612
6613 if (en) {
6614 req->enable = loop_mode_b;
6615 req->mask = loop_mode_b;
6616 } else {
6617 req->mask = loop_mode_b;
6618 }
6619
6620 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6621 if (ret) {
6622 dev_err(&hdev->pdev->dev,
6623 "serdes loopback set fail, ret = %d\n", ret);
6624 return ret;
6625 }
6626
6627 do {
6628 msleep(HCLGE_SERDES_RETRY_MS);
6629 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6630 true);
6631 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6632 if (ret) {
6633 dev_err(&hdev->pdev->dev,
6634 "serdes loopback get, ret = %d\n", ret);
6635 return ret;
6636 }
6637 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6638 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6639
6640 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6641 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6642 return -EBUSY;
6643 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6644 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6645 return -EIO;
6646 }
6647 return ret;
6648}
6649
6650static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6651 enum hnae3_loop loop_mode)
6652{
6653 int ret;
6654
6655 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6656 if (ret)
6657 return ret;
6658
6659 hclge_cfg_mac_mode(hdev, en);
6660
6661 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6662 if (ret)
6663 dev_err(&hdev->pdev->dev,
6664 "serdes loopback config mac mode timeout\n");
6665
6666 return ret;
6667}
6668
6669static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6670 struct phy_device *phydev)
6671{
6672 int ret;
6673
6674 if (!phydev->suspended) {
6675 ret = phy_suspend(phydev);
6676 if (ret)
6677 return ret;
6678 }
6679
6680 ret = phy_resume(phydev);
6681 if (ret)
6682 return ret;
6683
6684 return phy_loopback(phydev, true);
6685}
6686
6687static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6688 struct phy_device *phydev)
6689{
6690 int ret;
6691
6692 ret = phy_loopback(phydev, false);
6693 if (ret)
6694 return ret;
6695
6696 return phy_suspend(phydev);
6697}
6698
6699static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6700{
6701 struct phy_device *phydev = hdev->hw.mac.phydev;
6702 int ret;
6703
6704 if (!phydev)
6705 return -ENOTSUPP;
6706
6707 if (en)
6708 ret = hclge_enable_phy_loopback(hdev, phydev);
6709 else
6710 ret = hclge_disable_phy_loopback(hdev, phydev);
6711 if (ret) {
6712 dev_err(&hdev->pdev->dev,
6713 "set phy loopback fail, ret = %d\n", ret);
6714 return ret;
6715 }
6716
6717 hclge_cfg_mac_mode(hdev, en);
6718
6719 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6720 if (ret)
6721 dev_err(&hdev->pdev->dev,
6722 "phy loopback config mac mode timeout\n");
6723
6724 return ret;
6725}
6726
6727static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6728 int stream_id, bool enable)
6729{
6730 struct hclge_desc desc;
6731 struct hclge_cfg_com_tqp_queue_cmd *req =
6732 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6733 int ret;
6734
6735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6736 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6737 req->stream_id = cpu_to_le16(stream_id);
6738 if (enable)
6739 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6740
6741 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6742 if (ret)
6743 dev_err(&hdev->pdev->dev,
6744 "Tqp enable fail, status =%d.\n", ret);
6745 return ret;
6746}
6747
6748static int hclge_set_loopback(struct hnae3_handle *handle,
6749 enum hnae3_loop loop_mode, bool en)
6750{
6751 struct hclge_vport *vport = hclge_get_vport(handle);
6752 struct hnae3_knic_private_info *kinfo;
6753 struct hclge_dev *hdev = vport->back;
6754 int i, ret;
6755
6756
6757
6758
6759
6760
6761 if (hdev->pdev->revision >= 0x21) {
6762 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6763
6764 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6765 HCLGE_SWITCH_ALW_LPBK_MASK);
6766 if (ret)
6767 return ret;
6768 }
6769
6770 switch (loop_mode) {
6771 case HNAE3_LOOP_APP:
6772 ret = hclge_set_app_loopback(hdev, en);
6773 break;
6774 case HNAE3_LOOP_SERIAL_SERDES:
6775 case HNAE3_LOOP_PARALLEL_SERDES:
6776 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6777 break;
6778 case HNAE3_LOOP_PHY:
6779 ret = hclge_set_phy_loopback(hdev, en);
6780 break;
6781 default:
6782 ret = -ENOTSUPP;
6783 dev_err(&hdev->pdev->dev,
6784 "loop_mode %d is not supported\n", loop_mode);
6785 break;
6786 }
6787
6788 if (ret)
6789 return ret;
6790
6791 kinfo = &vport->nic.kinfo;
6792 for (i = 0; i < kinfo->num_tqps; i++) {
6793 ret = hclge_tqp_enable(hdev, i, 0, en);
6794 if (ret)
6795 return ret;
6796 }
6797
6798 return 0;
6799}
6800
6801static int hclge_set_default_loopback(struct hclge_dev *hdev)
6802{
6803 int ret;
6804
6805 ret = hclge_set_app_loopback(hdev, false);
6806 if (ret)
6807 return ret;
6808
6809 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6810 if (ret)
6811 return ret;
6812
6813 return hclge_cfg_serdes_loopback(hdev, false,
6814 HNAE3_LOOP_PARALLEL_SERDES);
6815}
6816
6817static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6818{
6819 struct hclge_vport *vport = hclge_get_vport(handle);
6820 struct hnae3_knic_private_info *kinfo;
6821 struct hnae3_queue *queue;
6822 struct hclge_tqp *tqp;
6823 int i;
6824
6825 kinfo = &vport->nic.kinfo;
6826 for (i = 0; i < kinfo->num_tqps; i++) {
6827 queue = handle->kinfo.tqp[i];
6828 tqp = container_of(queue, struct hclge_tqp, q);
6829 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6830 }
6831}
6832
6833static void hclge_flush_link_update(struct hclge_dev *hdev)
6834{
6835#define HCLGE_FLUSH_LINK_TIMEOUT 100000
6836
6837 unsigned long last = hdev->serv_processed_cnt;
6838 int i = 0;
6839
6840 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6841 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6842 last == hdev->serv_processed_cnt)
6843 usleep_range(1, 1);
6844}
6845
6846static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6847{
6848 struct hclge_vport *vport = hclge_get_vport(handle);
6849 struct hclge_dev *hdev = vport->back;
6850
6851 if (enable) {
6852 hclge_task_schedule(hdev, 0);
6853 } else {
6854
6855 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6856
6857
6858 smp_mb__before_atomic();
6859 hclge_flush_link_update(hdev);
6860 }
6861}
6862
6863static int hclge_ae_start(struct hnae3_handle *handle)
6864{
6865 struct hclge_vport *vport = hclge_get_vport(handle);
6866 struct hclge_dev *hdev = vport->back;
6867
6868
6869 hclge_cfg_mac_mode(hdev, true);
6870 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6871 hdev->hw.mac.link = 0;
6872
6873
6874 hclge_reset_tqp_stats(handle);
6875
6876 hclge_mac_start_phy(hdev);
6877
6878 return 0;
6879}
6880
6881static void hclge_ae_stop(struct hnae3_handle *handle)
6882{
6883 struct hclge_vport *vport = hclge_get_vport(handle);
6884 struct hclge_dev *hdev = vport->back;
6885 int i;
6886
6887 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6888 spin_lock_bh(&hdev->fd_rule_lock);
6889 hclge_clear_arfs_rules(handle);
6890 spin_unlock_bh(&hdev->fd_rule_lock);
6891
6892
6893
6894
6895 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6896 hdev->reset_type != HNAE3_FUNC_RESET) {
6897 hclge_mac_stop_phy(hdev);
6898 hclge_update_link_status(hdev);
6899 return;
6900 }
6901
6902 for (i = 0; i < handle->kinfo.num_tqps; i++)
6903 hclge_reset_tqp(handle, i);
6904
6905 hclge_config_mac_tnl_int(hdev, false);
6906
6907
6908 hclge_cfg_mac_mode(hdev, false);
6909
6910 hclge_mac_stop_phy(hdev);
6911
6912
6913 hclge_reset_tqp_stats(handle);
6914 hclge_update_link_status(hdev);
6915}
6916
6917int hclge_vport_start(struct hclge_vport *vport)
6918{
6919 struct hclge_dev *hdev = vport->back;
6920
6921 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6922 vport->last_active_jiffies = jiffies;
6923
6924 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6925 if (vport->vport_id) {
6926 hclge_restore_mac_table_common(vport);
6927 hclge_restore_vport_vlan_table(vport);
6928 } else {
6929 hclge_restore_hw_table(hdev);
6930 }
6931 }
6932
6933 clear_bit(vport->vport_id, hdev->vport_config_block);
6934
6935 return 0;
6936}
6937
6938void hclge_vport_stop(struct hclge_vport *vport)
6939{
6940 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6941}
6942
6943static int hclge_client_start(struct hnae3_handle *handle)
6944{
6945 struct hclge_vport *vport = hclge_get_vport(handle);
6946
6947 return hclge_vport_start(vport);
6948}
6949
6950static void hclge_client_stop(struct hnae3_handle *handle)
6951{
6952 struct hclge_vport *vport = hclge_get_vport(handle);
6953
6954 hclge_vport_stop(vport);
6955}
6956
6957static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6958 u16 cmdq_resp, u8 resp_code,
6959 enum hclge_mac_vlan_tbl_opcode op)
6960{
6961 struct hclge_dev *hdev = vport->back;
6962
6963 if (cmdq_resp) {
6964 dev_err(&hdev->pdev->dev,
6965 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6966 cmdq_resp);
6967 return -EIO;
6968 }
6969
6970 if (op == HCLGE_MAC_VLAN_ADD) {
6971 if (!resp_code || resp_code == 1)
6972 return 0;
6973 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6974 resp_code == HCLGE_ADD_MC_OVERFLOW)
6975 return -ENOSPC;
6976
6977 dev_err(&hdev->pdev->dev,
6978 "add mac addr failed for undefined, code=%u.\n",
6979 resp_code);
6980 return -EIO;
6981 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6982 if (!resp_code) {
6983 return 0;
6984 } else if (resp_code == 1) {
6985 dev_dbg(&hdev->pdev->dev,
6986 "remove mac addr failed for miss.\n");
6987 return -ENOENT;
6988 }
6989
6990 dev_err(&hdev->pdev->dev,
6991 "remove mac addr failed for undefined, code=%u.\n",
6992 resp_code);
6993 return -EIO;
6994 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6995 if (!resp_code) {
6996 return 0;
6997 } else if (resp_code == 1) {
6998 dev_dbg(&hdev->pdev->dev,
6999 "lookup mac addr failed for miss.\n");
7000 return -ENOENT;
7001 }
7002
7003 dev_err(&hdev->pdev->dev,
7004 "lookup mac addr failed for undefined, code=%u.\n",
7005 resp_code);
7006 return -EIO;
7007 }
7008
7009 dev_err(&hdev->pdev->dev,
7010 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7011
7012 return -EINVAL;
7013}
7014
7015static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7016{
7017#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7018
7019 unsigned int word_num;
7020 unsigned int bit_num;
7021
7022 if (vfid > 255 || vfid < 0)
7023 return -EIO;
7024
7025 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7026 word_num = vfid / 32;
7027 bit_num = vfid % 32;
7028 if (clr)
7029 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7030 else
7031 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7032 } else {
7033 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7034 bit_num = vfid % 32;
7035 if (clr)
7036 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7037 else
7038 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7039 }
7040
7041 return 0;
7042}
7043
7044static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7045{
7046#define HCLGE_DESC_NUMBER 3
7047#define HCLGE_FUNC_NUMBER_PER_DESC 6
7048 int i, j;
7049
7050 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7051 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7052 if (desc[i].data[j])
7053 return false;
7054
7055 return true;
7056}
7057
7058static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7059 const u8 *addr, bool is_mc)
7060{
7061 const unsigned char *mac_addr = addr;
7062 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7063 (mac_addr[0]) | (mac_addr[1] << 8);
7064 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
7065
7066 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7067 if (is_mc) {
7068 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7069 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7070 }
7071
7072 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7073 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7074}
7075
7076static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7077 struct hclge_mac_vlan_tbl_entry_cmd *req)
7078{
7079 struct hclge_dev *hdev = vport->back;
7080 struct hclge_desc desc;
7081 u8 resp_code;
7082 u16 retval;
7083 int ret;
7084
7085 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7086
7087 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7088
7089 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7090 if (ret) {
7091 dev_err(&hdev->pdev->dev,
7092 "del mac addr failed for cmd_send, ret =%d.\n",
7093 ret);
7094 return ret;
7095 }
7096 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7097 retval = le16_to_cpu(desc.retval);
7098
7099 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7100 HCLGE_MAC_VLAN_REMOVE);
7101}
7102
7103static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7104 struct hclge_mac_vlan_tbl_entry_cmd *req,
7105 struct hclge_desc *desc,
7106 bool is_mc)
7107{
7108 struct hclge_dev *hdev = vport->back;
7109 u8 resp_code;
7110 u16 retval;
7111 int ret;
7112
7113 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7114 if (is_mc) {
7115 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7116 memcpy(desc[0].data,
7117 req,
7118 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7119 hclge_cmd_setup_basic_desc(&desc[1],
7120 HCLGE_OPC_MAC_VLAN_ADD,
7121 true);
7122 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7123 hclge_cmd_setup_basic_desc(&desc[2],
7124 HCLGE_OPC_MAC_VLAN_ADD,
7125 true);
7126 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7127 } else {
7128 memcpy(desc[0].data,
7129 req,
7130 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7131 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7132 }
7133 if (ret) {
7134 dev_err(&hdev->pdev->dev,
7135 "lookup mac addr failed for cmd_send, ret =%d.\n",
7136 ret);
7137 return ret;
7138 }
7139 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7140 retval = le16_to_cpu(desc[0].retval);
7141
7142 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7143 HCLGE_MAC_VLAN_LKUP);
7144}
7145
7146static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7147 struct hclge_mac_vlan_tbl_entry_cmd *req,
7148 struct hclge_desc *mc_desc)
7149{
7150 struct hclge_dev *hdev = vport->back;
7151 int cfg_status;
7152 u8 resp_code;
7153 u16 retval;
7154 int ret;
7155
7156 if (!mc_desc) {
7157 struct hclge_desc desc;
7158
7159 hclge_cmd_setup_basic_desc(&desc,
7160 HCLGE_OPC_MAC_VLAN_ADD,
7161 false);
7162 memcpy(desc.data, req,
7163 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7164 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7166 retval = le16_to_cpu(desc.retval);
7167
7168 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7169 resp_code,
7170 HCLGE_MAC_VLAN_ADD);
7171 } else {
7172 hclge_cmd_reuse_desc(&mc_desc[0], false);
7173 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7174 hclge_cmd_reuse_desc(&mc_desc[1], false);
7175 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7176 hclge_cmd_reuse_desc(&mc_desc[2], false);
7177 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7178 memcpy(mc_desc[0].data, req,
7179 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7180 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7181 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7182 retval = le16_to_cpu(mc_desc[0].retval);
7183
7184 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7185 resp_code,
7186 HCLGE_MAC_VLAN_ADD);
7187 }
7188
7189 if (ret) {
7190 dev_err(&hdev->pdev->dev,
7191 "add mac addr failed for cmd_send, ret =%d.\n",
7192 ret);
7193 return ret;
7194 }
7195
7196 return cfg_status;
7197}
7198
7199static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7200 u16 *allocated_size)
7201{
7202 struct hclge_umv_spc_alc_cmd *req;
7203 struct hclge_desc desc;
7204 int ret;
7205
7206 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7207 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7208
7209 req->space_size = cpu_to_le32(space_size);
7210
7211 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7212 if (ret) {
7213 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7214 ret);
7215 return ret;
7216 }
7217
7218 *allocated_size = le32_to_cpu(desc.data[1]);
7219
7220 return 0;
7221}
7222
7223static int hclge_init_umv_space(struct hclge_dev *hdev)
7224{
7225 u16 allocated_size = 0;
7226 int ret;
7227
7228 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7229 if (ret)
7230 return ret;
7231
7232 if (allocated_size < hdev->wanted_umv_size)
7233 dev_warn(&hdev->pdev->dev,
7234 "failed to alloc umv space, want %u, get %u\n",
7235 hdev->wanted_umv_size, allocated_size);
7236
7237 hdev->max_umv_size = allocated_size;
7238 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7239 hdev->share_umv_size = hdev->priv_umv_size +
7240 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7241
7242 return 0;
7243}
7244
7245static void hclge_reset_umv_space(struct hclge_dev *hdev)
7246{
7247 struct hclge_vport *vport;
7248 int i;
7249
7250 for (i = 0; i < hdev->num_alloc_vport; i++) {
7251 vport = &hdev->vport[i];
7252 vport->used_umv_num = 0;
7253 }
7254
7255 mutex_lock(&hdev->vport_lock);
7256 hdev->share_umv_size = hdev->priv_umv_size +
7257 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7258 mutex_unlock(&hdev->vport_lock);
7259}
7260
7261static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7262{
7263 struct hclge_dev *hdev = vport->back;
7264 bool is_full;
7265
7266 if (need_lock)
7267 mutex_lock(&hdev->vport_lock);
7268
7269 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7270 hdev->share_umv_size == 0);
7271
7272 if (need_lock)
7273 mutex_unlock(&hdev->vport_lock);
7274
7275 return is_full;
7276}
7277
7278static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7279{
7280 struct hclge_dev *hdev = vport->back;
7281
7282 if (is_free) {
7283 if (vport->used_umv_num > hdev->priv_umv_size)
7284 hdev->share_umv_size++;
7285
7286 if (vport->used_umv_num > 0)
7287 vport->used_umv_num--;
7288 } else {
7289 if (vport->used_umv_num >= hdev->priv_umv_size &&
7290 hdev->share_umv_size > 0)
7291 hdev->share_umv_size--;
7292 vport->used_umv_num++;
7293 }
7294}
7295
7296static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7297 const u8 *mac_addr)
7298{
7299 struct hclge_mac_node *mac_node, *tmp;
7300
7301 list_for_each_entry_safe(mac_node, tmp, list, node)
7302 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7303 return mac_node;
7304
7305 return NULL;
7306}
7307
7308static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7309 enum HCLGE_MAC_NODE_STATE state)
7310{
7311 switch (state) {
7312
7313 case HCLGE_MAC_TO_ADD:
7314 if (mac_node->state == HCLGE_MAC_TO_DEL)
7315 mac_node->state = HCLGE_MAC_ACTIVE;
7316 break;
7317
7318 case HCLGE_MAC_TO_DEL:
7319 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7320 list_del(&mac_node->node);
7321 kfree(mac_node);
7322 } else {
7323 mac_node->state = HCLGE_MAC_TO_DEL;
7324 }
7325 break;
7326
7327
7328
7329 case HCLGE_MAC_ACTIVE:
7330 if (mac_node->state == HCLGE_MAC_TO_ADD)
7331 mac_node->state = HCLGE_MAC_ACTIVE;
7332
7333 break;
7334 }
7335}
7336
7337int hclge_update_mac_list(struct hclge_vport *vport,
7338 enum HCLGE_MAC_NODE_STATE state,
7339 enum HCLGE_MAC_ADDR_TYPE mac_type,
7340 const unsigned char *addr)
7341{
7342 struct hclge_dev *hdev = vport->back;
7343 struct hclge_mac_node *mac_node;
7344 struct list_head *list;
7345
7346 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7347 &vport->uc_mac_list : &vport->mc_mac_list;
7348
7349 spin_lock_bh(&vport->mac_list_lock);
7350
7351
7352
7353
7354
7355 mac_node = hclge_find_mac_node(list, addr);
7356 if (mac_node) {
7357 hclge_update_mac_node(mac_node, state);
7358 spin_unlock_bh(&vport->mac_list_lock);
7359 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7360 return 0;
7361 }
7362
7363
7364 if (state == HCLGE_MAC_TO_DEL) {
7365 spin_unlock_bh(&vport->mac_list_lock);
7366 dev_err(&hdev->pdev->dev,
7367 "failed to delete address %pM from mac list\n",
7368 addr);
7369 return -ENOENT;
7370 }
7371
7372 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7373 if (!mac_node) {
7374 spin_unlock_bh(&vport->mac_list_lock);
7375 return -ENOMEM;
7376 }
7377
7378 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7379
7380 mac_node->state = state;
7381 ether_addr_copy(mac_node->mac_addr, addr);
7382 list_add_tail(&mac_node->node, list);
7383
7384 spin_unlock_bh(&vport->mac_list_lock);
7385
7386 return 0;
7387}
7388
7389static int hclge_add_uc_addr(struct hnae3_handle *handle,
7390 const unsigned char *addr)
7391{
7392 struct hclge_vport *vport = hclge_get_vport(handle);
7393
7394 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7395 addr);
7396}
7397
7398int hclge_add_uc_addr_common(struct hclge_vport *vport,
7399 const unsigned char *addr)
7400{
7401 struct hclge_dev *hdev = vport->back;
7402 struct hclge_mac_vlan_tbl_entry_cmd req;
7403 struct hclge_desc desc;
7404 u16 egress_port = 0;
7405 int ret;
7406
7407
7408 if (is_zero_ether_addr(addr) ||
7409 is_broadcast_ether_addr(addr) ||
7410 is_multicast_ether_addr(addr)) {
7411 dev_err(&hdev->pdev->dev,
7412 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7413 addr, is_zero_ether_addr(addr),
7414 is_broadcast_ether_addr(addr),
7415 is_multicast_ether_addr(addr));
7416 return -EINVAL;
7417 }
7418
7419 memset(&req, 0, sizeof(req));
7420
7421 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7422 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7423
7424 req.egress_port = cpu_to_le16(egress_port);
7425
7426 hclge_prepare_mac_addr(&req, addr, false);
7427
7428
7429
7430
7431
7432 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7433 if (ret == -ENOENT) {
7434 mutex_lock(&hdev->vport_lock);
7435 if (!hclge_is_umv_space_full(vport, false)) {
7436 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7437 if (!ret)
7438 hclge_update_umv_space(vport, false);
7439 mutex_unlock(&hdev->vport_lock);
7440 return ret;
7441 }
7442 mutex_unlock(&hdev->vport_lock);
7443
7444 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7445 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7446 hdev->priv_umv_size);
7447
7448 return -ENOSPC;
7449 }
7450
7451
7452 if (!ret) {
7453 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7454 vport->vport_id, addr);
7455 return 0;
7456 }
7457
7458 dev_err(&hdev->pdev->dev,
7459 "PF failed to add unicast entry(%pM) in the MAC table\n",
7460 addr);
7461
7462 return ret;
7463}
7464
7465static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7466 const unsigned char *addr)
7467{
7468 struct hclge_vport *vport = hclge_get_vport(handle);
7469
7470 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7471 addr);
7472}
7473
7474int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7475 const unsigned char *addr)
7476{
7477 struct hclge_dev *hdev = vport->back;
7478 struct hclge_mac_vlan_tbl_entry_cmd req;
7479 int ret;
7480
7481
7482 if (is_zero_ether_addr(addr) ||
7483 is_broadcast_ether_addr(addr) ||
7484 is_multicast_ether_addr(addr)) {
7485 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7486 addr);
7487 return -EINVAL;
7488 }
7489
7490 memset(&req, 0, sizeof(req));
7491 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7492 hclge_prepare_mac_addr(&req, addr, false);
7493 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7494 if (!ret) {
7495 mutex_lock(&hdev->vport_lock);
7496 hclge_update_umv_space(vport, true);
7497 mutex_unlock(&hdev->vport_lock);
7498 } else if (ret == -ENOENT) {
7499 ret = 0;
7500 }
7501
7502 return ret;
7503}
7504
7505static int hclge_add_mc_addr(struct hnae3_handle *handle,
7506 const unsigned char *addr)
7507{
7508 struct hclge_vport *vport = hclge_get_vport(handle);
7509
7510 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7511 addr);
7512}
7513
7514int hclge_add_mc_addr_common(struct hclge_vport *vport,
7515 const unsigned char *addr)
7516{
7517 struct hclge_dev *hdev = vport->back;
7518 struct hclge_mac_vlan_tbl_entry_cmd req;
7519 struct hclge_desc desc[3];
7520 int status;
7521
7522
7523 if (!is_multicast_ether_addr(addr)) {
7524 dev_err(&hdev->pdev->dev,
7525 "Add mc mac err! invalid mac:%pM.\n",
7526 addr);
7527 return -EINVAL;
7528 }
7529 memset(&req, 0, sizeof(req));
7530 hclge_prepare_mac_addr(&req, addr, true);
7531 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7532 if (status) {
7533
7534 memset(desc[0].data, 0, sizeof(desc[0].data));
7535 memset(desc[1].data, 0, sizeof(desc[0].data));
7536 memset(desc[2].data, 0, sizeof(desc[0].data));
7537 }
7538 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7539 if (status)
7540 return status;
7541 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7542
7543
7544 if (status == -ENOSPC &&
7545 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7546 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7547
7548 return status;
7549}
7550
7551static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7552 const unsigned char *addr)
7553{
7554 struct hclge_vport *vport = hclge_get_vport(handle);
7555
7556 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7557 addr);
7558}
7559
7560int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7561 const unsigned char *addr)
7562{
7563 struct hclge_dev *hdev = vport->back;
7564 struct hclge_mac_vlan_tbl_entry_cmd req;
7565 enum hclge_cmd_status status;
7566 struct hclge_desc desc[3];
7567
7568
7569 if (!is_multicast_ether_addr(addr)) {
7570 dev_dbg(&hdev->pdev->dev,
7571 "Remove mc mac err! invalid mac:%pM.\n",
7572 addr);
7573 return -EINVAL;
7574 }
7575
7576 memset(&req, 0, sizeof(req));
7577 hclge_prepare_mac_addr(&req, addr, true);
7578 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7579 if (!status) {
7580
7581 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7582 if (status)
7583 return status;
7584
7585 if (hclge_is_all_function_id_zero(desc))
7586
7587 status = hclge_remove_mac_vlan_tbl(vport, &req);
7588 else
7589
7590 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7591
7592 } else if (status == -ENOENT) {
7593 status = 0;
7594 }
7595
7596 return status;
7597}
7598
7599static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7600 struct list_head *list,
7601 int (*sync)(struct hclge_vport *,
7602 const unsigned char *))
7603{
7604 struct hclge_mac_node *mac_node, *tmp;
7605 int ret;
7606
7607 list_for_each_entry_safe(mac_node, tmp, list, node) {
7608 ret = sync(vport, mac_node->mac_addr);
7609 if (!ret) {
7610 mac_node->state = HCLGE_MAC_ACTIVE;
7611 } else {
7612 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7613 &vport->state);
7614 break;
7615 }
7616 }
7617}
7618
7619static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7620 struct list_head *list,
7621 int (*unsync)(struct hclge_vport *,
7622 const unsigned char *))
7623{
7624 struct hclge_mac_node *mac_node, *tmp;
7625 int ret;
7626
7627 list_for_each_entry_safe(mac_node, tmp, list, node) {
7628 ret = unsync(vport, mac_node->mac_addr);
7629 if (!ret || ret == -ENOENT) {
7630 list_del(&mac_node->node);
7631 kfree(mac_node);
7632 } else {
7633 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7634 &vport->state);
7635 break;
7636 }
7637 }
7638}
7639
7640static bool hclge_sync_from_add_list(struct list_head *add_list,
7641 struct list_head *mac_list)
7642{
7643 struct hclge_mac_node *mac_node, *tmp, *new_node;
7644 bool all_added = true;
7645
7646 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7647 if (mac_node->state == HCLGE_MAC_TO_ADD)
7648 all_added = false;
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7659 if (new_node) {
7660 hclge_update_mac_node(new_node, mac_node->state);
7661 list_del(&mac_node->node);
7662 kfree(mac_node);
7663 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7664 mac_node->state = HCLGE_MAC_TO_DEL;
7665 list_del(&mac_node->node);
7666 list_add_tail(&mac_node->node, mac_list);
7667 } else {
7668 list_del(&mac_node->node);
7669 kfree(mac_node);
7670 }
7671 }
7672
7673 return all_added;
7674}
7675
7676static void hclge_sync_from_del_list(struct list_head *del_list,
7677 struct list_head *mac_list)
7678{
7679 struct hclge_mac_node *mac_node, *tmp, *new_node;
7680
7681 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7682 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7683 if (new_node) {
7684
7685
7686
7687
7688
7689
7690
7691 new_node->state = HCLGE_MAC_ACTIVE;
7692 list_del(&mac_node->node);
7693 kfree(mac_node);
7694 } else {
7695 list_del(&mac_node->node);
7696 list_add_tail(&mac_node->node, mac_list);
7697 }
7698 }
7699}
7700
7701static void hclge_update_overflow_flags(struct hclge_vport *vport,
7702 enum HCLGE_MAC_ADDR_TYPE mac_type,
7703 bool is_all_added)
7704{
7705 if (mac_type == HCLGE_MAC_ADDR_UC) {
7706 if (is_all_added)
7707 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7708 else
7709 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7710 } else {
7711 if (is_all_added)
7712 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7713 else
7714 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7715 }
7716}
7717
7718static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7719 enum HCLGE_MAC_ADDR_TYPE mac_type)
7720{
7721 struct hclge_mac_node *mac_node, *tmp, *new_node;
7722 struct list_head tmp_add_list, tmp_del_list;
7723 struct list_head *list;
7724 bool all_added;
7725
7726 INIT_LIST_HEAD(&tmp_add_list);
7727 INIT_LIST_HEAD(&tmp_del_list);
7728
7729
7730
7731
7732 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7733 &vport->uc_mac_list : &vport->mc_mac_list;
7734
7735 spin_lock_bh(&vport->mac_list_lock);
7736
7737 list_for_each_entry_safe(mac_node, tmp, list, node) {
7738 switch (mac_node->state) {
7739 case HCLGE_MAC_TO_DEL:
7740 list_del(&mac_node->node);
7741 list_add_tail(&mac_node->node, &tmp_del_list);
7742 break;
7743 case HCLGE_MAC_TO_ADD:
7744 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7745 if (!new_node)
7746 goto stop_traverse;
7747 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7748 new_node->state = mac_node->state;
7749 list_add_tail(&new_node->node, &tmp_add_list);
7750 break;
7751 default:
7752 break;
7753 }
7754 }
7755
7756stop_traverse:
7757 spin_unlock_bh(&vport->mac_list_lock);
7758
7759
7760 if (mac_type == HCLGE_MAC_ADDR_UC) {
7761 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7762 hclge_rm_uc_addr_common);
7763 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7764 hclge_add_uc_addr_common);
7765 } else {
7766 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7767 hclge_rm_mc_addr_common);
7768 hclge_sync_vport_mac_list(vport, &tmp_add_list,
7769 hclge_add_mc_addr_common);
7770 }
7771
7772
7773
7774
7775 spin_lock_bh(&vport->mac_list_lock);
7776
7777 hclge_sync_from_del_list(&tmp_del_list, list);
7778 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7779
7780 spin_unlock_bh(&vport->mac_list_lock);
7781
7782 hclge_update_overflow_flags(vport, mac_type, all_added);
7783}
7784
7785static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7786{
7787 struct hclge_dev *hdev = vport->back;
7788
7789 if (test_bit(vport->vport_id, hdev->vport_config_block))
7790 return false;
7791
7792 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7793 return true;
7794
7795 return false;
7796}
7797
7798static void hclge_sync_mac_table(struct hclge_dev *hdev)
7799{
7800 int i;
7801
7802 for (i = 0; i < hdev->num_alloc_vport; i++) {
7803 struct hclge_vport *vport = &hdev->vport[i];
7804
7805 if (!hclge_need_sync_mac_table(vport))
7806 continue;
7807
7808 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7809 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7810 }
7811}
7812
7813void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7814 enum HCLGE_MAC_ADDR_TYPE mac_type)
7815{
7816 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7817 struct hclge_mac_node *mac_cfg, *tmp;
7818 struct hclge_dev *hdev = vport->back;
7819 struct list_head tmp_del_list, *list;
7820 int ret;
7821
7822 if (mac_type == HCLGE_MAC_ADDR_UC) {
7823 list = &vport->uc_mac_list;
7824 unsync = hclge_rm_uc_addr_common;
7825 } else {
7826 list = &vport->mc_mac_list;
7827 unsync = hclge_rm_mc_addr_common;
7828 }
7829
7830 INIT_LIST_HEAD(&tmp_del_list);
7831
7832 if (!is_del_list)
7833 set_bit(vport->vport_id, hdev->vport_config_block);
7834
7835 spin_lock_bh(&vport->mac_list_lock);
7836
7837 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7838 switch (mac_cfg->state) {
7839 case HCLGE_MAC_TO_DEL:
7840 case HCLGE_MAC_ACTIVE:
7841 list_del(&mac_cfg->node);
7842 list_add_tail(&mac_cfg->node, &tmp_del_list);
7843 break;
7844 case HCLGE_MAC_TO_ADD:
7845 if (is_del_list) {
7846 list_del(&mac_cfg->node);
7847 kfree(mac_cfg);
7848 }
7849 break;
7850 }
7851 }
7852
7853 spin_unlock_bh(&vport->mac_list_lock);
7854
7855 list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7856 ret = unsync(vport, mac_cfg->mac_addr);
7857 if (!ret || ret == -ENOENT) {
7858
7859
7860
7861
7862 if (!is_del_list &&
7863 mac_cfg->state == HCLGE_MAC_ACTIVE) {
7864 mac_cfg->state = HCLGE_MAC_TO_ADD;
7865 } else {
7866 list_del(&mac_cfg->node);
7867 kfree(mac_cfg);
7868 }
7869 } else if (is_del_list) {
7870 mac_cfg->state = HCLGE_MAC_TO_DEL;
7871 }
7872 }
7873
7874 spin_lock_bh(&vport->mac_list_lock);
7875
7876 hclge_sync_from_del_list(&tmp_del_list, list);
7877
7878 spin_unlock_bh(&vport->mac_list_lock);
7879}
7880
7881
7882static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7883 enum HCLGE_MAC_ADDR_TYPE mac_type)
7884{
7885 struct hclge_mac_node *mac_node, *tmp;
7886 struct hclge_dev *hdev = vport->back;
7887 struct list_head tmp_del_list, *list;
7888
7889 INIT_LIST_HEAD(&tmp_del_list);
7890
7891 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7892 &vport->uc_mac_list : &vport->mc_mac_list;
7893
7894 spin_lock_bh(&vport->mac_list_lock);
7895
7896 list_for_each_entry_safe(mac_node, tmp, list, node) {
7897 switch (mac_node->state) {
7898 case HCLGE_MAC_TO_DEL:
7899 case HCLGE_MAC_ACTIVE:
7900 list_del(&mac_node->node);
7901 list_add_tail(&mac_node->node, &tmp_del_list);
7902 break;
7903 case HCLGE_MAC_TO_ADD:
7904 list_del(&mac_node->node);
7905 kfree(mac_node);
7906 break;
7907 }
7908 }
7909
7910 spin_unlock_bh(&vport->mac_list_lock);
7911
7912 if (mac_type == HCLGE_MAC_ADDR_UC)
7913 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7914 hclge_rm_uc_addr_common);
7915 else
7916 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7917 hclge_rm_mc_addr_common);
7918
7919 if (!list_empty(&tmp_del_list))
7920 dev_warn(&hdev->pdev->dev,
7921 "uninit %s mac list for vport %u not completely.\n",
7922 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7923 vport->vport_id);
7924
7925 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7926 list_del(&mac_node->node);
7927 kfree(mac_node);
7928 }
7929}
7930
7931static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7932{
7933 struct hclge_vport *vport;
7934 int i;
7935
7936 for (i = 0; i < hdev->num_alloc_vport; i++) {
7937 vport = &hdev->vport[i];
7938 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7939 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7940 }
7941}
7942
7943static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7944 u16 cmdq_resp, u8 resp_code)
7945{
7946#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7947#define HCLGE_ETHERTYPE_ALREADY_ADD 1
7948#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7949#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7950
7951 int return_status;
7952
7953 if (cmdq_resp) {
7954 dev_err(&hdev->pdev->dev,
7955 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7956 cmdq_resp);
7957 return -EIO;
7958 }
7959
7960 switch (resp_code) {
7961 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7962 case HCLGE_ETHERTYPE_ALREADY_ADD:
7963 return_status = 0;
7964 break;
7965 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7966 dev_err(&hdev->pdev->dev,
7967 "add mac ethertype failed for manager table overflow.\n");
7968 return_status = -EIO;
7969 break;
7970 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7971 dev_err(&hdev->pdev->dev,
7972 "add mac ethertype failed for key conflict.\n");
7973 return_status = -EIO;
7974 break;
7975 default:
7976 dev_err(&hdev->pdev->dev,
7977 "add mac ethertype failed for undefined, code=%u.\n",
7978 resp_code);
7979 return_status = -EIO;
7980 }
7981
7982 return return_status;
7983}
7984
7985static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7986 u8 *mac_addr)
7987{
7988 struct hclge_mac_vlan_tbl_entry_cmd req;
7989 struct hclge_dev *hdev = vport->back;
7990 struct hclge_desc desc;
7991 u16 egress_port = 0;
7992 int i;
7993
7994 if (is_zero_ether_addr(mac_addr))
7995 return false;
7996
7997 memset(&req, 0, sizeof(req));
7998 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7999 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8000 req.egress_port = cpu_to_le16(egress_port);
8001 hclge_prepare_mac_addr(&req, mac_addr, false);
8002
8003 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8004 return true;
8005
8006 vf_idx += HCLGE_VF_VPORT_START_NUM;
8007 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8008 if (i != vf_idx &&
8009 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8010 return true;
8011
8012 return false;
8013}
8014
8015static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8016 u8 *mac_addr)
8017{
8018 struct hclge_vport *vport = hclge_get_vport(handle);
8019 struct hclge_dev *hdev = vport->back;
8020
8021 vport = hclge_get_vf_vport(hdev, vf);
8022 if (!vport)
8023 return -EINVAL;
8024
8025 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8026 dev_info(&hdev->pdev->dev,
8027 "Specified MAC(=%pM) is same as before, no change committed!\n",
8028 mac_addr);
8029 return 0;
8030 }
8031
8032 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8033 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8034 mac_addr);
8035 return -EEXIST;
8036 }
8037
8038 ether_addr_copy(vport->vf_info.mac, mac_addr);
8039
8040 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8041 dev_info(&hdev->pdev->dev,
8042 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8043 vf, mac_addr);
8044 return hclge_inform_reset_assert_to_vf(vport);
8045 }
8046
8047 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8048 vf, mac_addr);
8049 return 0;
8050}
8051
8052static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8053 const struct hclge_mac_mgr_tbl_entry_cmd *req)
8054{
8055 struct hclge_desc desc;
8056 u8 resp_code;
8057 u16 retval;
8058 int ret;
8059
8060 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8061 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8062
8063 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8064 if (ret) {
8065 dev_err(&hdev->pdev->dev,
8066 "add mac ethertype failed for cmd_send, ret =%d.\n",
8067 ret);
8068 return ret;
8069 }
8070
8071 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8072 retval = le16_to_cpu(desc.retval);
8073
8074 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8075}
8076
8077static int init_mgr_tbl(struct hclge_dev *hdev)
8078{
8079 int ret;
8080 int i;
8081
8082 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8083 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8084 if (ret) {
8085 dev_err(&hdev->pdev->dev,
8086 "add mac ethertype failed, ret =%d.\n",
8087 ret);
8088 return ret;
8089 }
8090 }
8091
8092 return 0;
8093}
8094
8095static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8096{
8097 struct hclge_vport *vport = hclge_get_vport(handle);
8098 struct hclge_dev *hdev = vport->back;
8099
8100 ether_addr_copy(p, hdev->hw.mac.mac_addr);
8101}
8102
8103int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8104 const u8 *old_addr, const u8 *new_addr)
8105{
8106 struct list_head *list = &vport->uc_mac_list;
8107 struct hclge_mac_node *old_node, *new_node;
8108
8109 new_node = hclge_find_mac_node(list, new_addr);
8110 if (!new_node) {
8111 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8112 if (!new_node)
8113 return -ENOMEM;
8114
8115 new_node->state = HCLGE_MAC_TO_ADD;
8116 ether_addr_copy(new_node->mac_addr, new_addr);
8117 list_add(&new_node->node, list);
8118 } else {
8119 if (new_node->state == HCLGE_MAC_TO_DEL)
8120 new_node->state = HCLGE_MAC_ACTIVE;
8121
8122
8123
8124
8125
8126
8127 list_move(&new_node->node, list);
8128 }
8129
8130 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8131 old_node = hclge_find_mac_node(list, old_addr);
8132 if (old_node) {
8133 if (old_node->state == HCLGE_MAC_TO_ADD) {
8134 list_del(&old_node->node);
8135 kfree(old_node);
8136 } else {
8137 old_node->state = HCLGE_MAC_TO_DEL;
8138 }
8139 }
8140 }
8141
8142 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8143
8144 return 0;
8145}
8146
8147static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8148 bool is_first)
8149{
8150 const unsigned char *new_addr = (const unsigned char *)p;
8151 struct hclge_vport *vport = hclge_get_vport(handle);
8152 struct hclge_dev *hdev = vport->back;
8153 unsigned char *old_addr = NULL;
8154 int ret;
8155
8156
8157 if (is_zero_ether_addr(new_addr) ||
8158 is_broadcast_ether_addr(new_addr) ||
8159 is_multicast_ether_addr(new_addr)) {
8160 dev_err(&hdev->pdev->dev,
8161 "change uc mac err! invalid mac: %pM.\n",
8162 new_addr);
8163 return -EINVAL;
8164 }
8165
8166 ret = hclge_pause_addr_cfg(hdev, new_addr);
8167 if (ret) {
8168 dev_err(&hdev->pdev->dev,
8169 "failed to configure mac pause address, ret = %d\n",
8170 ret);
8171 return ret;
8172 }
8173
8174 if (!is_first)
8175 old_addr = hdev->hw.mac.mac_addr;
8176
8177 spin_lock_bh(&vport->mac_list_lock);
8178 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8179 if (ret) {
8180 dev_err(&hdev->pdev->dev,
8181 "failed to change the mac addr:%pM, ret = %d\n",
8182 new_addr, ret);
8183 spin_unlock_bh(&vport->mac_list_lock);
8184
8185 if (!is_first)
8186 hclge_pause_addr_cfg(hdev, old_addr);
8187
8188 return ret;
8189 }
8190
8191
8192
8193 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8194 spin_unlock_bh(&vport->mac_list_lock);
8195
8196 hclge_task_schedule(hdev, 0);
8197
8198 return 0;
8199}
8200
8201static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8202 int cmd)
8203{
8204 struct hclge_vport *vport = hclge_get_vport(handle);
8205 struct hclge_dev *hdev = vport->back;
8206
8207 if (!hdev->hw.mac.phydev)
8208 return -EOPNOTSUPP;
8209
8210 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8211}
8212
8213static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8214 u8 fe_type, bool filter_en, u8 vf_id)
8215{
8216 struct hclge_vlan_filter_ctrl_cmd *req;
8217 struct hclge_desc desc;
8218 int ret;
8219
8220
8221 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8222 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8223 req->vlan_type = vlan_type;
8224 req->vf_id = vf_id;
8225
8226 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8227 if (ret) {
8228 dev_err(&hdev->pdev->dev,
8229 "failed to get vlan filter config, ret = %d.\n", ret);
8230 return ret;
8231 }
8232
8233
8234 hclge_cmd_reuse_desc(&desc, false);
8235 req->vlan_fe = filter_en ?
8236 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8237
8238 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8239 if (ret)
8240 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8241 ret);
8242
8243 return ret;
8244}
8245
8246#define HCLGE_FILTER_TYPE_VF 0
8247#define HCLGE_FILTER_TYPE_PORT 1
8248#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
8249#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
8250#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
8251#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
8252#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
8253#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
8254 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8255#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
8256 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8257
8258static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8259{
8260 struct hclge_vport *vport = hclge_get_vport(handle);
8261 struct hclge_dev *hdev = vport->back;
8262
8263 if (hdev->pdev->revision >= 0x21) {
8264 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8265 HCLGE_FILTER_FE_EGRESS, enable, 0);
8266 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8267 HCLGE_FILTER_FE_INGRESS, enable, 0);
8268 } else {
8269 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8270 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8271 0);
8272 }
8273 if (enable)
8274 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8275 else
8276 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8277}
8278
8279static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8280 bool is_kill, u16 vlan,
8281 __be16 proto)
8282{
8283 struct hclge_vport *vport = &hdev->vport[vfid];
8284 struct hclge_vlan_filter_vf_cfg_cmd *req0;
8285 struct hclge_vlan_filter_vf_cfg_cmd *req1;
8286 struct hclge_desc desc[2];
8287 u8 vf_byte_val;
8288 u8 vf_byte_off;
8289 int ret;
8290
8291
8292
8293
8294
8295
8296 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8297 if (vport->vf_info.spoofchk && vlan) {
8298 dev_err(&hdev->pdev->dev,
8299 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8300 return -EPERM;
8301 }
8302 return 0;
8303 }
8304
8305 hclge_cmd_setup_basic_desc(&desc[0],
8306 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8307 hclge_cmd_setup_basic_desc(&desc[1],
8308 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8309
8310 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8311
8312 vf_byte_off = vfid / 8;
8313 vf_byte_val = 1 << (vfid % 8);
8314
8315 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8316 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8317
8318 req0->vlan_id = cpu_to_le16(vlan);
8319 req0->vlan_cfg = is_kill;
8320
8321 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8322 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8323 else
8324 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8325
8326 ret = hclge_cmd_send(&hdev->hw, desc, 2);
8327 if (ret) {
8328 dev_err(&hdev->pdev->dev,
8329 "Send vf vlan command fail, ret =%d.\n",
8330 ret);
8331 return ret;
8332 }
8333
8334 if (!is_kill) {
8335#define HCLGE_VF_VLAN_NO_ENTRY 2
8336 if (!req0->resp_code || req0->resp_code == 1)
8337 return 0;
8338
8339 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8340 set_bit(vfid, hdev->vf_vlan_full);
8341 dev_warn(&hdev->pdev->dev,
8342 "vf vlan table is full, vf vlan filter is disabled\n");
8343 return 0;
8344 }
8345
8346 dev_err(&hdev->pdev->dev,
8347 "Add vf vlan filter fail, ret =%u.\n",
8348 req0->resp_code);
8349 } else {
8350#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8351 if (!req0->resp_code)
8352 return 0;
8353
8354
8355
8356
8357
8358
8359 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8360 return 0;
8361
8362 dev_err(&hdev->pdev->dev,
8363 "Kill vf vlan filter fail, ret =%u.\n",
8364 req0->resp_code);
8365 }
8366
8367 return -EIO;
8368}
8369
8370static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8371 u16 vlan_id, bool is_kill)
8372{
8373 struct hclge_vlan_filter_pf_cfg_cmd *req;
8374 struct hclge_desc desc;
8375 u8 vlan_offset_byte_val;
8376 u8 vlan_offset_byte;
8377 u8 vlan_offset_160;
8378 int ret;
8379
8380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8381
8382 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8383 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8384 HCLGE_VLAN_BYTE_SIZE;
8385 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8386
8387 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8388 req->vlan_offset = vlan_offset_160;
8389 req->vlan_cfg = is_kill;
8390 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8391
8392 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8393 if (ret)
8394 dev_err(&hdev->pdev->dev,
8395 "port vlan command, send fail, ret =%d.\n", ret);
8396 return ret;
8397}
8398
8399static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8400 u16 vport_id, u16 vlan_id,
8401 bool is_kill)
8402{
8403 u16 vport_idx, vport_num = 0;
8404 int ret;
8405
8406 if (is_kill && !vlan_id)
8407 return 0;
8408
8409 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8410 proto);
8411 if (ret) {
8412 dev_err(&hdev->pdev->dev,
8413 "Set %u vport vlan filter config fail, ret =%d.\n",
8414 vport_id, ret);
8415 return ret;
8416 }
8417
8418
8419 if (!is_kill && !vlan_id &&
8420 test_bit(vport_id, hdev->vlan_table[vlan_id]))
8421 return 0;
8422
8423 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8424 dev_err(&hdev->pdev->dev,
8425 "Add port vlan failed, vport %u is already in vlan %u\n",
8426 vport_id, vlan_id);
8427 return -EINVAL;
8428 }
8429
8430 if (is_kill &&
8431 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8432 dev_err(&hdev->pdev->dev,
8433 "Delete port vlan failed, vport %u is not in vlan %u\n",
8434 vport_id, vlan_id);
8435 return -EINVAL;
8436 }
8437
8438 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8439 vport_num++;
8440
8441 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8442 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8443 is_kill);
8444
8445 return ret;
8446}
8447
8448static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8449{
8450 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8451 struct hclge_vport_vtag_tx_cfg_cmd *req;
8452 struct hclge_dev *hdev = vport->back;
8453 struct hclge_desc desc;
8454 u16 bmap_index;
8455 int status;
8456
8457 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8458
8459 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8460 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8461 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8462 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8463 vcfg->accept_tag1 ? 1 : 0);
8464 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8465 vcfg->accept_untag1 ? 1 : 0);
8466 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8467 vcfg->accept_tag2 ? 1 : 0);
8468 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8469 vcfg->accept_untag2 ? 1 : 0);
8470 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8471 vcfg->insert_tag1_en ? 1 : 0);
8472 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8473 vcfg->insert_tag2_en ? 1 : 0);
8474 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8475
8476 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8477 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8478 HCLGE_VF_NUM_PER_BYTE;
8479 req->vf_bitmap[bmap_index] =
8480 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8481
8482 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8483 if (status)
8484 dev_err(&hdev->pdev->dev,
8485 "Send port txvlan cfg command fail, ret =%d\n",
8486 status);
8487
8488 return status;
8489}
8490
8491static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8492{
8493 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8494 struct hclge_vport_vtag_rx_cfg_cmd *req;
8495 struct hclge_dev *hdev = vport->back;
8496 struct hclge_desc desc;
8497 u16 bmap_index;
8498 int status;
8499
8500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8501
8502 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8503 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8504 vcfg->strip_tag1_en ? 1 : 0);
8505 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8506 vcfg->strip_tag2_en ? 1 : 0);
8507 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8508 vcfg->vlan1_vlan_prionly ? 1 : 0);
8509 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8510 vcfg->vlan2_vlan_prionly ? 1 : 0);
8511
8512 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8513 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8514 HCLGE_VF_NUM_PER_BYTE;
8515 req->vf_bitmap[bmap_index] =
8516 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8517
8518 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8519 if (status)
8520 dev_err(&hdev->pdev->dev,
8521 "Send port rxvlan cfg command fail, ret =%d\n",
8522 status);
8523
8524 return status;
8525}
8526
8527static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8528 u16 port_base_vlan_state,
8529 u16 vlan_tag)
8530{
8531 int ret;
8532
8533 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8534 vport->txvlan_cfg.accept_tag1 = true;
8535 vport->txvlan_cfg.insert_tag1_en = false;
8536 vport->txvlan_cfg.default_tag1 = 0;
8537 } else {
8538 vport->txvlan_cfg.accept_tag1 = false;
8539 vport->txvlan_cfg.insert_tag1_en = true;
8540 vport->txvlan_cfg.default_tag1 = vlan_tag;
8541 }
8542
8543 vport->txvlan_cfg.accept_untag1 = true;
8544
8545
8546
8547
8548
8549 vport->txvlan_cfg.accept_tag2 = true;
8550 vport->txvlan_cfg.accept_untag2 = true;
8551 vport->txvlan_cfg.insert_tag2_en = false;
8552 vport->txvlan_cfg.default_tag2 = 0;
8553
8554 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8555 vport->rxvlan_cfg.strip_tag1_en = false;
8556 vport->rxvlan_cfg.strip_tag2_en =
8557 vport->rxvlan_cfg.rx_vlan_offload_en;
8558 } else {
8559 vport->rxvlan_cfg.strip_tag1_en =
8560 vport->rxvlan_cfg.rx_vlan_offload_en;
8561 vport->rxvlan_cfg.strip_tag2_en = true;
8562 }
8563 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8564 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8565
8566 ret = hclge_set_vlan_tx_offload_cfg(vport);
8567 if (ret)
8568 return ret;
8569
8570 return hclge_set_vlan_rx_offload_cfg(vport);
8571}
8572
8573static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8574{
8575 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8576 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8577 struct hclge_desc desc;
8578 int status;
8579
8580 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8581 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8582 rx_req->ot_fst_vlan_type =
8583 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8584 rx_req->ot_sec_vlan_type =
8585 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8586 rx_req->in_fst_vlan_type =
8587 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8588 rx_req->in_sec_vlan_type =
8589 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8590
8591 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8592 if (status) {
8593 dev_err(&hdev->pdev->dev,
8594 "Send rxvlan protocol type command fail, ret =%d\n",
8595 status);
8596 return status;
8597 }
8598
8599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8600
8601 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8602 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8603 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8604
8605 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8606 if (status)
8607 dev_err(&hdev->pdev->dev,
8608 "Send txvlan protocol type command fail, ret =%d\n",
8609 status);
8610
8611 return status;
8612}
8613
8614static int hclge_init_vlan_config(struct hclge_dev *hdev)
8615{
8616#define HCLGE_DEF_VLAN_TYPE 0x8100
8617
8618 struct hnae3_handle *handle = &hdev->vport[0].nic;
8619 struct hclge_vport *vport;
8620 int ret;
8621 int i;
8622
8623 if (hdev->pdev->revision >= 0x21) {
8624
8625 for (i = 0; i < hdev->num_alloc_vport; i++) {
8626 vport = &hdev->vport[i];
8627 ret = hclge_set_vlan_filter_ctrl(hdev,
8628 HCLGE_FILTER_TYPE_VF,
8629 HCLGE_FILTER_FE_EGRESS,
8630 true,
8631 vport->vport_id);
8632 if (ret)
8633 return ret;
8634 }
8635
8636 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8637 HCLGE_FILTER_FE_INGRESS, true,
8638 0);
8639 if (ret)
8640 return ret;
8641 } else {
8642 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8643 HCLGE_FILTER_FE_EGRESS_V1_B,
8644 true, 0);
8645 if (ret)
8646 return ret;
8647 }
8648
8649 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8650
8651 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8652 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8653 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8654 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8655 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8656 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657
8658 ret = hclge_set_vlan_protocol_type(hdev);
8659 if (ret)
8660 return ret;
8661
8662 for (i = 0; i < hdev->num_alloc_vport; i++) {
8663 u16 vlan_tag;
8664
8665 vport = &hdev->vport[i];
8666 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8667
8668 ret = hclge_vlan_offload_cfg(vport,
8669 vport->port_base_vlan_cfg.state,
8670 vlan_tag);
8671 if (ret)
8672 return ret;
8673 }
8674
8675 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8676}
8677
8678static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8679 bool writen_to_tbl)
8680{
8681 struct hclge_vport_vlan_cfg *vlan;
8682
8683 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8684 if (!vlan)
8685 return;
8686
8687 vlan->hd_tbl_status = writen_to_tbl;
8688 vlan->vlan_id = vlan_id;
8689
8690 list_add_tail(&vlan->node, &vport->vlan_list);
8691}
8692
8693static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8694{
8695 struct hclge_vport_vlan_cfg *vlan, *tmp;
8696 struct hclge_dev *hdev = vport->back;
8697 int ret;
8698
8699 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8700 if (!vlan->hd_tbl_status) {
8701 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8702 vport->vport_id,
8703 vlan->vlan_id, false);
8704 if (ret) {
8705 dev_err(&hdev->pdev->dev,
8706 "restore vport vlan list failed, ret=%d\n",
8707 ret);
8708 return ret;
8709 }
8710 }
8711 vlan->hd_tbl_status = true;
8712 }
8713
8714 return 0;
8715}
8716
8717static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8718 bool is_write_tbl)
8719{
8720 struct hclge_vport_vlan_cfg *vlan, *tmp;
8721 struct hclge_dev *hdev = vport->back;
8722
8723 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8724 if (vlan->vlan_id == vlan_id) {
8725 if (is_write_tbl && vlan->hd_tbl_status)
8726 hclge_set_vlan_filter_hw(hdev,
8727 htons(ETH_P_8021Q),
8728 vport->vport_id,
8729 vlan_id,
8730 true);
8731
8732 list_del(&vlan->node);
8733 kfree(vlan);
8734 break;
8735 }
8736 }
8737}
8738
8739void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8740{
8741 struct hclge_vport_vlan_cfg *vlan, *tmp;
8742 struct hclge_dev *hdev = vport->back;
8743
8744 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8745 if (vlan->hd_tbl_status)
8746 hclge_set_vlan_filter_hw(hdev,
8747 htons(ETH_P_8021Q),
8748 vport->vport_id,
8749 vlan->vlan_id,
8750 true);
8751
8752 vlan->hd_tbl_status = false;
8753 if (is_del_list) {
8754 list_del(&vlan->node);
8755 kfree(vlan);
8756 }
8757 }
8758 clear_bit(vport->vport_id, hdev->vf_vlan_full);
8759}
8760
8761void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8762{
8763 struct hclge_vport_vlan_cfg *vlan, *tmp;
8764 struct hclge_vport *vport;
8765 int i;
8766
8767 for (i = 0; i < hdev->num_alloc_vport; i++) {
8768 vport = &hdev->vport[i];
8769 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8770 list_del(&vlan->node);
8771 kfree(vlan);
8772 }
8773 }
8774}
8775
8776void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8777{
8778 struct hclge_vport_vlan_cfg *vlan, *tmp;
8779 struct hclge_dev *hdev = vport->back;
8780 u16 vlan_proto;
8781 u16 vlan_id;
8782 u16 state;
8783 int ret;
8784
8785 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8786 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8787 state = vport->port_base_vlan_cfg.state;
8788
8789 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8790 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8791 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8792 vport->vport_id, vlan_id,
8793 false);
8794 return;
8795 }
8796
8797 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8798 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8799 vport->vport_id,
8800 vlan->vlan_id, false);
8801 if (ret)
8802 break;
8803 vlan->hd_tbl_status = true;
8804 }
8805}
8806
8807
8808
8809
8810
8811
8812
8813static void hclge_mac_node_convert_for_reset(struct list_head *list)
8814{
8815 struct hclge_mac_node *mac_node, *tmp;
8816
8817 list_for_each_entry_safe(mac_node, tmp, list, node) {
8818 if (mac_node->state == HCLGE_MAC_ACTIVE) {
8819 mac_node->state = HCLGE_MAC_TO_ADD;
8820 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8821 list_del(&mac_node->node);
8822 kfree(mac_node);
8823 }
8824 }
8825}
8826
8827void hclge_restore_mac_table_common(struct hclge_vport *vport)
8828{
8829 spin_lock_bh(&vport->mac_list_lock);
8830
8831 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8832 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8833 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8834
8835 spin_unlock_bh(&vport->mac_list_lock);
8836}
8837
8838static void hclge_restore_hw_table(struct hclge_dev *hdev)
8839{
8840 struct hclge_vport *vport = &hdev->vport[0];
8841 struct hnae3_handle *handle = &vport->nic;
8842
8843 hclge_restore_mac_table_common(vport);
8844 hclge_restore_vport_vlan_table(vport);
8845 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8846
8847 hclge_restore_fd_entries(handle);
8848}
8849
8850int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8851{
8852 struct hclge_vport *vport = hclge_get_vport(handle);
8853
8854 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8855 vport->rxvlan_cfg.strip_tag1_en = false;
8856 vport->rxvlan_cfg.strip_tag2_en = enable;
8857 } else {
8858 vport->rxvlan_cfg.strip_tag1_en = enable;
8859 vport->rxvlan_cfg.strip_tag2_en = true;
8860 }
8861 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8862 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8863 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8864
8865 return hclge_set_vlan_rx_offload_cfg(vport);
8866}
8867
8868static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8869 u16 port_base_vlan_state,
8870 struct hclge_vlan_info *new_info,
8871 struct hclge_vlan_info *old_info)
8872{
8873 struct hclge_dev *hdev = vport->back;
8874 int ret;
8875
8876 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8877 hclge_rm_vport_all_vlan_table(vport, false);
8878 return hclge_set_vlan_filter_hw(hdev,
8879 htons(new_info->vlan_proto),
8880 vport->vport_id,
8881 new_info->vlan_tag,
8882 false);
8883 }
8884
8885 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8886 vport->vport_id, old_info->vlan_tag,
8887 true);
8888 if (ret)
8889 return ret;
8890
8891 return hclge_add_vport_all_vlan_table(vport);
8892}
8893
8894int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8895 struct hclge_vlan_info *vlan_info)
8896{
8897 struct hnae3_handle *nic = &vport->nic;
8898 struct hclge_vlan_info *old_vlan_info;
8899 struct hclge_dev *hdev = vport->back;
8900 int ret;
8901
8902 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8903
8904 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8905 if (ret)
8906 return ret;
8907
8908 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8909
8910 ret = hclge_set_vlan_filter_hw(hdev,
8911 htons(vlan_info->vlan_proto),
8912 vport->vport_id,
8913 vlan_info->vlan_tag,
8914 false);
8915 if (ret)
8916 return ret;
8917
8918
8919 ret = hclge_set_vlan_filter_hw(hdev,
8920 htons(old_vlan_info->vlan_proto),
8921 vport->vport_id,
8922 old_vlan_info->vlan_tag,
8923 true);
8924 if (ret)
8925 return ret;
8926
8927 goto update;
8928 }
8929
8930 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8931 old_vlan_info);
8932 if (ret)
8933 return ret;
8934
8935
8936 vport->port_base_vlan_cfg.state = state;
8937 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8938 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8939 else
8940 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8941
8942update:
8943 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8944 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8945 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8946
8947 return 0;
8948}
8949
8950static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8951 enum hnae3_port_base_vlan_state state,
8952 u16 vlan)
8953{
8954 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8955 if (!vlan)
8956 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8957 else
8958 return HNAE3_PORT_BASE_VLAN_ENABLE;
8959 } else {
8960 if (!vlan)
8961 return HNAE3_PORT_BASE_VLAN_DISABLE;
8962 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8963 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8964 else
8965 return HNAE3_PORT_BASE_VLAN_MODIFY;
8966 }
8967}
8968
8969static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8970 u16 vlan, u8 qos, __be16 proto)
8971{
8972 struct hclge_vport *vport = hclge_get_vport(handle);
8973 struct hclge_dev *hdev = vport->back;
8974 struct hclge_vlan_info vlan_info;
8975 u16 state;
8976 int ret;
8977
8978 if (hdev->pdev->revision == 0x20)
8979 return -EOPNOTSUPP;
8980
8981 vport = hclge_get_vf_vport(hdev, vfid);
8982 if (!vport)
8983 return -EINVAL;
8984
8985
8986 if (vlan > VLAN_N_VID - 1 || qos > 7)
8987 return -EINVAL;
8988 if (proto != htons(ETH_P_8021Q))
8989 return -EPROTONOSUPPORT;
8990
8991 state = hclge_get_port_base_vlan_state(vport,
8992 vport->port_base_vlan_cfg.state,
8993 vlan);
8994 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8995 return 0;
8996
8997 vlan_info.vlan_tag = vlan;
8998 vlan_info.qos = qos;
8999 vlan_info.vlan_proto = ntohs(proto);
9000
9001 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9002 return hclge_update_port_base_vlan_cfg(vport, state,
9003 &vlan_info);
9004 } else {
9005 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9006 vport->vport_id, state,
9007 vlan, qos,
9008 ntohs(proto));
9009 return ret;
9010 }
9011}
9012
9013static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9014{
9015 struct hclge_vlan_info *vlan_info;
9016 struct hclge_vport *vport;
9017 int ret;
9018 int vf;
9019
9020
9021 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9022 vport = &hdev->vport[vf];
9023 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9024
9025 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9026 vport->vport_id,
9027 vlan_info->vlan_tag, true);
9028 if (ret)
9029 dev_err(&hdev->pdev->dev,
9030 "failed to clear vf vlan for vf%d, ret = %d\n",
9031 vf - HCLGE_VF_VPORT_START_NUM, ret);
9032 }
9033}
9034
9035int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9036 u16 vlan_id, bool is_kill)
9037{
9038 struct hclge_vport *vport = hclge_get_vport(handle);
9039 struct hclge_dev *hdev = vport->back;
9040 bool writen_to_tbl = false;
9041 int ret = 0;
9042
9043
9044
9045
9046
9047 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9048 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9049 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9050 return -EBUSY;
9051 }
9052
9053
9054
9055
9056
9057
9058
9059 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9060 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9061 vlan_id, is_kill);
9062 writen_to_tbl = true;
9063 }
9064
9065 if (!ret) {
9066 if (is_kill)
9067 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9068 else
9069 hclge_add_vport_vlan_table(vport, vlan_id,
9070 writen_to_tbl);
9071 } else if (is_kill) {
9072
9073
9074
9075
9076 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9077 }
9078 return ret;
9079}
9080
9081static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9082{
9083#define HCLGE_MAX_SYNC_COUNT 60
9084
9085 int i, ret, sync_cnt = 0;
9086 u16 vlan_id;
9087
9088
9089 for (i = 0; i < hdev->num_alloc_vport; i++) {
9090 struct hclge_vport *vport = &hdev->vport[i];
9091
9092 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9093 VLAN_N_VID);
9094 while (vlan_id != VLAN_N_VID) {
9095 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9096 vport->vport_id, vlan_id,
9097 true);
9098 if (ret && ret != -EINVAL)
9099 return;
9100
9101 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9102 hclge_rm_vport_vlan_table(vport, vlan_id, false);
9103
9104 sync_cnt++;
9105 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9106 return;
9107
9108 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9109 VLAN_N_VID);
9110 }
9111 }
9112}
9113
9114static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9115{
9116 struct hclge_config_max_frm_size_cmd *req;
9117 struct hclge_desc desc;
9118
9119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9120
9121 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9122 req->max_frm_size = cpu_to_le16(new_mps);
9123 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9124
9125 return hclge_cmd_send(&hdev->hw, &desc, 1);
9126}
9127
9128static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9129{
9130 struct hclge_vport *vport = hclge_get_vport(handle);
9131
9132 return hclge_set_vport_mtu(vport, new_mtu);
9133}
9134
9135int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9136{
9137 struct hclge_dev *hdev = vport->back;
9138 int i, max_frm_size, ret;
9139
9140
9141 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9142 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9143 max_frm_size > HCLGE_MAC_MAX_FRAME)
9144 return -EINVAL;
9145
9146 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9147 mutex_lock(&hdev->vport_lock);
9148
9149 if (vport->vport_id && max_frm_size > hdev->mps) {
9150 mutex_unlock(&hdev->vport_lock);
9151 return -EINVAL;
9152 } else if (vport->vport_id) {
9153 vport->mps = max_frm_size;
9154 mutex_unlock(&hdev->vport_lock);
9155 return 0;
9156 }
9157
9158
9159 for (i = 1; i < hdev->num_alloc_vport; i++)
9160 if (max_frm_size < hdev->vport[i].mps) {
9161 mutex_unlock(&hdev->vport_lock);
9162 return -EINVAL;
9163 }
9164
9165 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9166
9167 ret = hclge_set_mac_mtu(hdev, max_frm_size);
9168 if (ret) {
9169 dev_err(&hdev->pdev->dev,
9170 "Change mtu fail, ret =%d\n", ret);
9171 goto out;
9172 }
9173
9174 hdev->mps = max_frm_size;
9175 vport->mps = max_frm_size;
9176
9177 ret = hclge_buffer_alloc(hdev);
9178 if (ret)
9179 dev_err(&hdev->pdev->dev,
9180 "Allocate buffer fail, ret =%d\n", ret);
9181
9182out:
9183 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9184 mutex_unlock(&hdev->vport_lock);
9185 return ret;
9186}
9187
9188static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9189 bool enable)
9190{
9191 struct hclge_reset_tqp_queue_cmd *req;
9192 struct hclge_desc desc;
9193 int ret;
9194
9195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9196
9197 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9198 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9199 if (enable)
9200 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9201
9202 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9203 if (ret) {
9204 dev_err(&hdev->pdev->dev,
9205 "Send tqp reset cmd error, status =%d\n", ret);
9206 return ret;
9207 }
9208
9209 return 0;
9210}
9211
9212static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9213{
9214 struct hclge_reset_tqp_queue_cmd *req;
9215 struct hclge_desc desc;
9216 int ret;
9217
9218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9219
9220 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9221 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9222
9223 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9224 if (ret) {
9225 dev_err(&hdev->pdev->dev,
9226 "Get reset status error, status =%d\n", ret);
9227 return ret;
9228 }
9229
9230 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9231}
9232
9233u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9234{
9235 struct hnae3_queue *queue;
9236 struct hclge_tqp *tqp;
9237
9238 queue = handle->kinfo.tqp[queue_id];
9239 tqp = container_of(queue, struct hclge_tqp, q);
9240
9241 return tqp->index;
9242}
9243
9244int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9245{
9246 struct hclge_vport *vport = hclge_get_vport(handle);
9247 struct hclge_dev *hdev = vport->back;
9248 int reset_try_times = 0;
9249 int reset_status;
9250 u16 queue_gid;
9251 int ret;
9252
9253 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9254
9255 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9256 if (ret) {
9257 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9258 return ret;
9259 }
9260
9261 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9262 if (ret) {
9263 dev_err(&hdev->pdev->dev,
9264 "Send reset tqp cmd fail, ret = %d\n", ret);
9265 return ret;
9266 }
9267
9268 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9269 reset_status = hclge_get_reset_status(hdev, queue_gid);
9270 if (reset_status)
9271 break;
9272
9273
9274 usleep_range(1000, 1200);
9275 }
9276
9277 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9278 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9279 return ret;
9280 }
9281
9282 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9283 if (ret)
9284 dev_err(&hdev->pdev->dev,
9285 "Deassert the soft reset fail, ret = %d\n", ret);
9286
9287 return ret;
9288}
9289
9290void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9291{
9292 struct hclge_dev *hdev = vport->back;
9293 int reset_try_times = 0;
9294 int reset_status;
9295 u16 queue_gid;
9296 int ret;
9297
9298 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9299
9300 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9301 if (ret) {
9302 dev_warn(&hdev->pdev->dev,
9303 "Send reset tqp cmd fail, ret = %d\n", ret);
9304 return;
9305 }
9306
9307 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9308 reset_status = hclge_get_reset_status(hdev, queue_gid);
9309 if (reset_status)
9310 break;
9311
9312
9313 usleep_range(1000, 1200);
9314 }
9315
9316 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9317 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9318 return;
9319 }
9320
9321 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9322 if (ret)
9323 dev_warn(&hdev->pdev->dev,
9324 "Deassert the soft reset fail, ret = %d\n", ret);
9325}
9326
9327static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9328{
9329 struct hclge_vport *vport = hclge_get_vport(handle);
9330 struct hclge_dev *hdev = vport->back;
9331
9332 return hdev->fw_version;
9333}
9334
9335static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9336{
9337 struct phy_device *phydev = hdev->hw.mac.phydev;
9338
9339 if (!phydev)
9340 return;
9341
9342 phy_set_asym_pause(phydev, rx_en, tx_en);
9343}
9344
9345static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9346{
9347 int ret;
9348
9349 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9350 return 0;
9351
9352 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9353 if (ret)
9354 dev_err(&hdev->pdev->dev,
9355 "configure pauseparam error, ret = %d.\n", ret);
9356
9357 return ret;
9358}
9359
9360int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9361{
9362 struct phy_device *phydev = hdev->hw.mac.phydev;
9363 u16 remote_advertising = 0;
9364 u16 local_advertising;
9365 u32 rx_pause, tx_pause;
9366 u8 flowctl;
9367
9368 if (!phydev->link || !phydev->autoneg)
9369 return 0;
9370
9371 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9372
9373 if (phydev->pause)
9374 remote_advertising = LPA_PAUSE_CAP;
9375
9376 if (phydev->asym_pause)
9377 remote_advertising |= LPA_PAUSE_ASYM;
9378
9379 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9380 remote_advertising);
9381 tx_pause = flowctl & FLOW_CTRL_TX;
9382 rx_pause = flowctl & FLOW_CTRL_RX;
9383
9384 if (phydev->duplex == HCLGE_MAC_HALF) {
9385 tx_pause = 0;
9386 rx_pause = 0;
9387 }
9388
9389 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9390}
9391
9392static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9393 u32 *rx_en, u32 *tx_en)
9394{
9395 struct hclge_vport *vport = hclge_get_vport(handle);
9396 struct hclge_dev *hdev = vport->back;
9397 struct phy_device *phydev = hdev->hw.mac.phydev;
9398
9399 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9400
9401 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9402 *rx_en = 0;
9403 *tx_en = 0;
9404 return;
9405 }
9406
9407 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9408 *rx_en = 1;
9409 *tx_en = 0;
9410 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9411 *tx_en = 1;
9412 *rx_en = 0;
9413 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9414 *rx_en = 1;
9415 *tx_en = 1;
9416 } else {
9417 *rx_en = 0;
9418 *tx_en = 0;
9419 }
9420}
9421
9422static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9423 u32 rx_en, u32 tx_en)
9424{
9425 if (rx_en && tx_en)
9426 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9427 else if (rx_en && !tx_en)
9428 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9429 else if (!rx_en && tx_en)
9430 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9431 else
9432 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9433
9434 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9435}
9436
9437static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9438 u32 rx_en, u32 tx_en)
9439{
9440 struct hclge_vport *vport = hclge_get_vport(handle);
9441 struct hclge_dev *hdev = vport->back;
9442 struct phy_device *phydev = hdev->hw.mac.phydev;
9443 u32 fc_autoneg;
9444
9445 if (phydev) {
9446 fc_autoneg = hclge_get_autoneg(handle);
9447 if (auto_neg != fc_autoneg) {
9448 dev_info(&hdev->pdev->dev,
9449 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9450 return -EOPNOTSUPP;
9451 }
9452 }
9453
9454 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9455 dev_info(&hdev->pdev->dev,
9456 "Priority flow control enabled. Cannot set link flow control.\n");
9457 return -EOPNOTSUPP;
9458 }
9459
9460 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9461
9462 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9463
9464 if (!auto_neg)
9465 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9466
9467 if (phydev)
9468 return phy_start_aneg(phydev);
9469
9470 return -EOPNOTSUPP;
9471}
9472
9473static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9474 u8 *auto_neg, u32 *speed, u8 *duplex)
9475{
9476 struct hclge_vport *vport = hclge_get_vport(handle);
9477 struct hclge_dev *hdev = vport->back;
9478
9479 if (speed)
9480 *speed = hdev->hw.mac.speed;
9481 if (duplex)
9482 *duplex = hdev->hw.mac.duplex;
9483 if (auto_neg)
9484 *auto_neg = hdev->hw.mac.autoneg;
9485}
9486
9487static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9488 u8 *module_type)
9489{
9490 struct hclge_vport *vport = hclge_get_vport(handle);
9491 struct hclge_dev *hdev = vport->back;
9492
9493
9494
9495
9496
9497 hclge_update_port_info(hdev);
9498
9499 if (media_type)
9500 *media_type = hdev->hw.mac.media_type;
9501
9502 if (module_type)
9503 *module_type = hdev->hw.mac.module_type;
9504}
9505
9506static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9507 u8 *tp_mdix_ctrl, u8 *tp_mdix)
9508{
9509 struct hclge_vport *vport = hclge_get_vport(handle);
9510 struct hclge_dev *hdev = vport->back;
9511 struct phy_device *phydev = hdev->hw.mac.phydev;
9512 int mdix_ctrl, mdix, is_resolved;
9513 unsigned int retval;
9514
9515 if (!phydev) {
9516 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9517 *tp_mdix = ETH_TP_MDI_INVALID;
9518 return;
9519 }
9520
9521 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9522
9523 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9524 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9525 HCLGE_PHY_MDIX_CTRL_S);
9526
9527 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9528 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9529 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9530
9531 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9532
9533 switch (mdix_ctrl) {
9534 case 0x0:
9535 *tp_mdix_ctrl = ETH_TP_MDI;
9536 break;
9537 case 0x1:
9538 *tp_mdix_ctrl = ETH_TP_MDI_X;
9539 break;
9540 case 0x3:
9541 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9542 break;
9543 default:
9544 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9545 break;
9546 }
9547
9548 if (!is_resolved)
9549 *tp_mdix = ETH_TP_MDI_INVALID;
9550 else if (mdix)
9551 *tp_mdix = ETH_TP_MDI_X;
9552 else
9553 *tp_mdix = ETH_TP_MDI;
9554}
9555
9556static void hclge_info_show(struct hclge_dev *hdev)
9557{
9558 struct device *dev = &hdev->pdev->dev;
9559
9560 dev_info(dev, "PF info begin:\n");
9561
9562 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9563 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9564 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9565 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9566 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9567 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9568 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9569 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9570 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9571 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9572 dev_info(dev, "This is %s PF\n",
9573 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9574 dev_info(dev, "DCB %s\n",
9575 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9576 dev_info(dev, "MQPRIO %s\n",
9577 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9578
9579 dev_info(dev, "PF info end.\n");
9580}
9581
9582static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9583 struct hclge_vport *vport)
9584{
9585 struct hnae3_client *client = vport->nic.client;
9586 struct hclge_dev *hdev = ae_dev->priv;
9587 int rst_cnt = hdev->rst_stats.reset_cnt;
9588 int ret;
9589
9590 ret = client->ops->init_instance(&vport->nic);
9591 if (ret)
9592 return ret;
9593
9594 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9595 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9596 rst_cnt != hdev->rst_stats.reset_cnt) {
9597 ret = -EBUSY;
9598 goto init_nic_err;
9599 }
9600
9601
9602 ret = hclge_config_nic_hw_error(hdev, true);
9603 if (ret) {
9604 dev_err(&ae_dev->pdev->dev,
9605 "fail(%d) to enable hw error interrupts\n", ret);
9606 goto init_nic_err;
9607 }
9608
9609 hnae3_set_client_init_flag(client, ae_dev, 1);
9610
9611 if (netif_msg_drv(&hdev->vport->nic))
9612 hclge_info_show(hdev);
9613
9614 return ret;
9615
9616init_nic_err:
9617 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9618 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9619 msleep(HCLGE_WAIT_RESET_DONE);
9620
9621 client->ops->uninit_instance(&vport->nic, 0);
9622
9623 return ret;
9624}
9625
9626static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9627 struct hclge_vport *vport)
9628{
9629 struct hclge_dev *hdev = ae_dev->priv;
9630 struct hnae3_client *client;
9631 int rst_cnt;
9632 int ret;
9633
9634 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9635 !hdev->nic_client)
9636 return 0;
9637
9638 client = hdev->roce_client;
9639 ret = hclge_init_roce_base_info(vport);
9640 if (ret)
9641 return ret;
9642
9643 rst_cnt = hdev->rst_stats.reset_cnt;
9644 ret = client->ops->init_instance(&vport->roce);
9645 if (ret)
9646 return ret;
9647
9648 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9649 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9650 rst_cnt != hdev->rst_stats.reset_cnt) {
9651 ret = -EBUSY;
9652 goto init_roce_err;
9653 }
9654
9655
9656 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9657 if (ret) {
9658 dev_err(&ae_dev->pdev->dev,
9659 "fail(%d) to enable roce ras interrupts\n", ret);
9660 goto init_roce_err;
9661 }
9662
9663 hnae3_set_client_init_flag(client, ae_dev, 1);
9664
9665 return 0;
9666
9667init_roce_err:
9668 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9669 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9670 msleep(HCLGE_WAIT_RESET_DONE);
9671
9672 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9673
9674 return ret;
9675}
9676
9677static int hclge_init_client_instance(struct hnae3_client *client,
9678 struct hnae3_ae_dev *ae_dev)
9679{
9680 struct hclge_dev *hdev = ae_dev->priv;
9681 struct hclge_vport *vport;
9682 int i, ret;
9683
9684 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9685 vport = &hdev->vport[i];
9686
9687 switch (client->type) {
9688 case HNAE3_CLIENT_KNIC:
9689 hdev->nic_client = client;
9690 vport->nic.client = client;
9691 ret = hclge_init_nic_client_instance(ae_dev, vport);
9692 if (ret)
9693 goto clear_nic;
9694
9695 ret = hclge_init_roce_client_instance(ae_dev, vport);
9696 if (ret)
9697 goto clear_roce;
9698
9699 break;
9700 case HNAE3_CLIENT_ROCE:
9701 if (hnae3_dev_roce_supported(hdev)) {
9702 hdev->roce_client = client;
9703 vport->roce.client = client;
9704 }
9705
9706 ret = hclge_init_roce_client_instance(ae_dev, vport);
9707 if (ret)
9708 goto clear_roce;
9709
9710 break;
9711 default:
9712 return -EINVAL;
9713 }
9714 }
9715
9716 return 0;
9717
9718clear_nic:
9719 hdev->nic_client = NULL;
9720 vport->nic.client = NULL;
9721 return ret;
9722clear_roce:
9723 hdev->roce_client = NULL;
9724 vport->roce.client = NULL;
9725 return ret;
9726}
9727
9728static void hclge_uninit_client_instance(struct hnae3_client *client,
9729 struct hnae3_ae_dev *ae_dev)
9730{
9731 struct hclge_dev *hdev = ae_dev->priv;
9732 struct hclge_vport *vport;
9733 int i;
9734
9735 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9736 vport = &hdev->vport[i];
9737 if (hdev->roce_client) {
9738 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9739 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9740 msleep(HCLGE_WAIT_RESET_DONE);
9741
9742 hdev->roce_client->ops->uninit_instance(&vport->roce,
9743 0);
9744 hdev->roce_client = NULL;
9745 vport->roce.client = NULL;
9746 }
9747 if (client->type == HNAE3_CLIENT_ROCE)
9748 return;
9749 if (hdev->nic_client && client->ops->uninit_instance) {
9750 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9751 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9752 msleep(HCLGE_WAIT_RESET_DONE);
9753
9754 client->ops->uninit_instance(&vport->nic, 0);
9755 hdev->nic_client = NULL;
9756 vport->nic.client = NULL;
9757 }
9758 }
9759}
9760
9761static int hclge_pci_init(struct hclge_dev *hdev)
9762{
9763 struct pci_dev *pdev = hdev->pdev;
9764 struct hclge_hw *hw;
9765 int ret;
9766
9767 ret = pci_enable_device(pdev);
9768 if (ret) {
9769 dev_err(&pdev->dev, "failed to enable PCI device\n");
9770 return ret;
9771 }
9772
9773 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9774 if (ret) {
9775 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9776 if (ret) {
9777 dev_err(&pdev->dev,
9778 "can't set consistent PCI DMA");
9779 goto err_disable_device;
9780 }
9781 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9782 }
9783
9784 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9785 if (ret) {
9786 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9787 goto err_disable_device;
9788 }
9789
9790 pci_set_master(pdev);
9791 hw = &hdev->hw;
9792 hw->io_base = pcim_iomap(pdev, 2, 0);
9793 if (!hw->io_base) {
9794 dev_err(&pdev->dev, "Can't map configuration register space\n");
9795 ret = -ENOMEM;
9796 goto err_clr_master;
9797 }
9798
9799 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9800
9801 return 0;
9802err_clr_master:
9803 pci_clear_master(pdev);
9804 pci_release_regions(pdev);
9805err_disable_device:
9806 pci_disable_device(pdev);
9807
9808 return ret;
9809}
9810
9811static void hclge_pci_uninit(struct hclge_dev *hdev)
9812{
9813 struct pci_dev *pdev = hdev->pdev;
9814
9815 pcim_iounmap(pdev, hdev->hw.io_base);
9816 pci_free_irq_vectors(pdev);
9817 pci_clear_master(pdev);
9818 pci_release_mem_regions(pdev);
9819 pci_disable_device(pdev);
9820}
9821
9822static void hclge_state_init(struct hclge_dev *hdev)
9823{
9824 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9825 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9826 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9827 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9828 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9829 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9830 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9831}
9832
9833static void hclge_state_uninit(struct hclge_dev *hdev)
9834{
9835 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9836 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9837
9838 if (hdev->reset_timer.function)
9839 del_timer_sync(&hdev->reset_timer);
9840 if (hdev->service_task.work.func)
9841 cancel_delayed_work_sync(&hdev->service_task);
9842}
9843
9844static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9845{
9846#define HCLGE_FLR_RETRY_WAIT_MS 500
9847#define HCLGE_FLR_RETRY_CNT 5
9848
9849 struct hclge_dev *hdev = ae_dev->priv;
9850 int retry_cnt = 0;
9851 int ret;
9852
9853retry:
9854 down(&hdev->reset_sem);
9855 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9856 hdev->reset_type = HNAE3_FLR_RESET;
9857 ret = hclge_reset_prepare(hdev);
9858 if (ret || hdev->reset_pending) {
9859 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9860 ret);
9861 if (hdev->reset_pending ||
9862 retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9863 dev_err(&hdev->pdev->dev,
9864 "reset_pending:0x%lx, retry_cnt:%d\n",
9865 hdev->reset_pending, retry_cnt);
9866 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9867 up(&hdev->reset_sem);
9868 msleep(HCLGE_FLR_RETRY_WAIT_MS);
9869 goto retry;
9870 }
9871 }
9872
9873
9874 hclge_enable_vector(&hdev->misc_vector, false);
9875 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9876 hdev->rst_stats.flr_rst_cnt++;
9877}
9878
9879static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9880{
9881 struct hclge_dev *hdev = ae_dev->priv;
9882 int ret;
9883
9884 hclge_enable_vector(&hdev->misc_vector, true);
9885
9886 ret = hclge_reset_rebuild(hdev);
9887 if (ret)
9888 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9889
9890 hdev->reset_type = HNAE3_NONE_RESET;
9891 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9892 up(&hdev->reset_sem);
9893}
9894
9895static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9896{
9897 u16 i;
9898
9899 for (i = 0; i < hdev->num_alloc_vport; i++) {
9900 struct hclge_vport *vport = &hdev->vport[i];
9901 int ret;
9902
9903
9904 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9905 if (ret)
9906 dev_warn(&hdev->pdev->dev,
9907 "clear vf(%u) rst failed %d!\n",
9908 vport->vport_id, ret);
9909 }
9910}
9911
9912static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9913{
9914 struct pci_dev *pdev = ae_dev->pdev;
9915 struct hclge_dev *hdev;
9916 int ret;
9917
9918 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9919 if (!hdev)
9920 return -ENOMEM;
9921
9922 hdev->pdev = pdev;
9923 hdev->ae_dev = ae_dev;
9924 hdev->reset_type = HNAE3_NONE_RESET;
9925 hdev->reset_level = HNAE3_FUNC_RESET;
9926 ae_dev->priv = hdev;
9927
9928
9929 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9930
9931 mutex_init(&hdev->vport_lock);
9932 spin_lock_init(&hdev->fd_rule_lock);
9933 sema_init(&hdev->reset_sem, 1);
9934
9935 ret = hclge_pci_init(hdev);
9936 if (ret)
9937 goto out;
9938
9939
9940 ret = hclge_cmd_queue_init(hdev);
9941 if (ret)
9942 goto err_pci_uninit;
9943
9944
9945 ret = hclge_cmd_init(hdev);
9946 if (ret)
9947 goto err_cmd_uninit;
9948
9949 ret = hclge_get_cap(hdev);
9950 if (ret)
9951 goto err_cmd_uninit;
9952
9953 ret = hclge_configure(hdev);
9954 if (ret) {
9955 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9956 goto err_cmd_uninit;
9957 }
9958
9959 ret = hclge_init_msi(hdev);
9960 if (ret) {
9961 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9962 goto err_cmd_uninit;
9963 }
9964
9965 ret = hclge_misc_irq_init(hdev);
9966 if (ret)
9967 goto err_msi_uninit;
9968
9969 ret = hclge_alloc_tqps(hdev);
9970 if (ret) {
9971 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9972 goto err_msi_irq_uninit;
9973 }
9974
9975 ret = hclge_alloc_vport(hdev);
9976 if (ret)
9977 goto err_msi_irq_uninit;
9978
9979 ret = hclge_map_tqp(hdev);
9980 if (ret)
9981 goto err_msi_irq_uninit;
9982
9983 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9984 ret = hclge_mac_mdio_config(hdev);
9985 if (ret)
9986 goto err_msi_irq_uninit;
9987 }
9988
9989 ret = hclge_init_umv_space(hdev);
9990 if (ret)
9991 goto err_mdiobus_unreg;
9992
9993 ret = hclge_mac_init(hdev);
9994 if (ret) {
9995 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9996 goto err_mdiobus_unreg;
9997 }
9998
9999 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10000 if (ret) {
10001 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10002 goto err_mdiobus_unreg;
10003 }
10004
10005 ret = hclge_config_gro(hdev, true);
10006 if (ret)
10007 goto err_mdiobus_unreg;
10008
10009 ret = hclge_init_vlan_config(hdev);
10010 if (ret) {
10011 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10012 goto err_mdiobus_unreg;
10013 }
10014
10015 ret = hclge_tm_schd_init(hdev);
10016 if (ret) {
10017 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10018 goto err_mdiobus_unreg;
10019 }
10020
10021 hclge_rss_init_cfg(hdev);
10022 ret = hclge_rss_init_hw(hdev);
10023 if (ret) {
10024 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10025 goto err_mdiobus_unreg;
10026 }
10027
10028 ret = init_mgr_tbl(hdev);
10029 if (ret) {
10030 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10031 goto err_mdiobus_unreg;
10032 }
10033
10034 ret = hclge_init_fd_config(hdev);
10035 if (ret) {
10036 dev_err(&pdev->dev,
10037 "fd table init fail, ret=%d\n", ret);
10038 goto err_mdiobus_unreg;
10039 }
10040
10041 INIT_KFIFO(hdev->mac_tnl_log);
10042
10043 hclge_dcb_ops_set(hdev);
10044
10045 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10046 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10047
10048
10049
10050
10051 hclge_misc_affinity_setup(hdev);
10052
10053 hclge_clear_all_event_cause(hdev);
10054 hclge_clear_resetting_state(hdev);
10055
10056
10057 hclge_handle_all_hns_hw_errors(ae_dev);
10058
10059
10060
10061
10062 if (ae_dev->hw_err_reset_req) {
10063 enum hnae3_reset_type reset_level;
10064
10065 reset_level = hclge_get_reset_level(ae_dev,
10066 &ae_dev->hw_err_reset_req);
10067 hclge_set_def_reset_request(ae_dev, reset_level);
10068 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10069 }
10070
10071
10072 hclge_enable_vector(&hdev->misc_vector, true);
10073
10074 hclge_state_init(hdev);
10075 hdev->last_reset_time = jiffies;
10076
10077 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10078 HCLGE_DRIVER_NAME);
10079
10080 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10081
10082 return 0;
10083
10084err_mdiobus_unreg:
10085 if (hdev->hw.mac.phydev)
10086 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10087err_msi_irq_uninit:
10088 hclge_misc_irq_uninit(hdev);
10089err_msi_uninit:
10090 pci_free_irq_vectors(pdev);
10091err_cmd_uninit:
10092 hclge_cmd_uninit(hdev);
10093err_pci_uninit:
10094 pcim_iounmap(pdev, hdev->hw.io_base);
10095 pci_clear_master(pdev);
10096 pci_release_regions(pdev);
10097 pci_disable_device(pdev);
10098out:
10099 mutex_destroy(&hdev->vport_lock);
10100 return ret;
10101}
10102
10103static void hclge_stats_clear(struct hclge_dev *hdev)
10104{
10105 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10106}
10107
10108static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10109{
10110 return hclge_config_switch_param(hdev, vf, enable,
10111 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10112}
10113
10114static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10115{
10116 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10117 HCLGE_FILTER_FE_NIC_INGRESS_B,
10118 enable, vf);
10119}
10120
10121static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10122{
10123 int ret;
10124
10125 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10126 if (ret) {
10127 dev_err(&hdev->pdev->dev,
10128 "Set vf %d mac spoof check %s failed, ret=%d\n",
10129 vf, enable ? "on" : "off", ret);
10130 return ret;
10131 }
10132
10133 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10134 if (ret)
10135 dev_err(&hdev->pdev->dev,
10136 "Set vf %d vlan spoof check %s failed, ret=%d\n",
10137 vf, enable ? "on" : "off", ret);
10138
10139 return ret;
10140}
10141
10142static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10143 bool enable)
10144{
10145 struct hclge_vport *vport = hclge_get_vport(handle);
10146 struct hclge_dev *hdev = vport->back;
10147 u32 new_spoofchk = enable ? 1 : 0;
10148 int ret;
10149
10150 if (hdev->pdev->revision == 0x20)
10151 return -EOPNOTSUPP;
10152
10153 vport = hclge_get_vf_vport(hdev, vf);
10154 if (!vport)
10155 return -EINVAL;
10156
10157 if (vport->vf_info.spoofchk == new_spoofchk)
10158 return 0;
10159
10160 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10161 dev_warn(&hdev->pdev->dev,
10162 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10163 vf);
10164 else if (enable && hclge_is_umv_space_full(vport, true))
10165 dev_warn(&hdev->pdev->dev,
10166 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10167 vf);
10168
10169 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10170 if (ret)
10171 return ret;
10172
10173 vport->vf_info.spoofchk = new_spoofchk;
10174 return 0;
10175}
10176
10177static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10178{
10179 struct hclge_vport *vport = hdev->vport;
10180 int ret;
10181 int i;
10182
10183 if (hdev->pdev->revision == 0x20)
10184 return 0;
10185
10186
10187 for (i = 0; i < hdev->num_alloc_vport; i++) {
10188 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10189 vport->vf_info.spoofchk);
10190 if (ret)
10191 return ret;
10192
10193 vport++;
10194 }
10195
10196 return 0;
10197}
10198
10199static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10200{
10201 struct hclge_vport *vport = hclge_get_vport(handle);
10202 struct hclge_dev *hdev = vport->back;
10203 u32 new_trusted = enable ? 1 : 0;
10204 bool en_bc_pmc;
10205 int ret;
10206
10207 vport = hclge_get_vf_vport(hdev, vf);
10208 if (!vport)
10209 return -EINVAL;
10210
10211 if (vport->vf_info.trusted == new_trusted)
10212 return 0;
10213
10214
10215 if (!enable && vport->vf_info.promisc_enable) {
10216 en_bc_pmc = hdev->pdev->revision != 0x20;
10217 ret = hclge_set_vport_promisc_mode(vport, false, false,
10218 en_bc_pmc);
10219 if (ret)
10220 return ret;
10221 vport->vf_info.promisc_enable = 0;
10222 hclge_inform_vf_promisc_info(vport);
10223 }
10224
10225 vport->vf_info.trusted = new_trusted;
10226
10227 return 0;
10228}
10229
10230static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10231{
10232 int ret;
10233 int vf;
10234
10235
10236 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10237 struct hclge_vport *vport = &hdev->vport[vf];
10238
10239 vport->vf_info.max_tx_rate = 0;
10240 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10241 if (ret)
10242 dev_err(&hdev->pdev->dev,
10243 "vf%d failed to reset to default, ret=%d\n",
10244 vf - HCLGE_VF_VPORT_START_NUM, ret);
10245 }
10246}
10247
10248static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10249 int min_tx_rate, int max_tx_rate)
10250{
10251 if (min_tx_rate != 0 ||
10252 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10253 dev_err(&hdev->pdev->dev,
10254 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10255 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10256 return -EINVAL;
10257 }
10258
10259 return 0;
10260}
10261
10262static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10263 int min_tx_rate, int max_tx_rate, bool force)
10264{
10265 struct hclge_vport *vport = hclge_get_vport(handle);
10266 struct hclge_dev *hdev = vport->back;
10267 int ret;
10268
10269 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10270 if (ret)
10271 return ret;
10272
10273 vport = hclge_get_vf_vport(hdev, vf);
10274 if (!vport)
10275 return -EINVAL;
10276
10277 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10278 return 0;
10279
10280 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10281 if (ret)
10282 return ret;
10283
10284 vport->vf_info.max_tx_rate = max_tx_rate;
10285
10286 return 0;
10287}
10288
10289static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10290{
10291 struct hnae3_handle *handle = &hdev->vport->nic;
10292 struct hclge_vport *vport;
10293 int ret;
10294 int vf;
10295
10296
10297 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10298 vport = hclge_get_vf_vport(hdev, vf);
10299 if (!vport)
10300 return -EINVAL;
10301
10302
10303
10304
10305 if (!vport->vf_info.max_tx_rate)
10306 continue;
10307
10308 ret = hclge_set_vf_rate(handle, vf, 0,
10309 vport->vf_info.max_tx_rate, true);
10310 if (ret) {
10311 dev_err(&hdev->pdev->dev,
10312 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10313 vf, vport->vf_info.max_tx_rate, ret);
10314 return ret;
10315 }
10316 }
10317
10318 return 0;
10319}
10320
10321static void hclge_reset_vport_state(struct hclge_dev *hdev)
10322{
10323 struct hclge_vport *vport = hdev->vport;
10324 int i;
10325
10326 for (i = 0; i < hdev->num_alloc_vport; i++) {
10327 hclge_vport_stop(vport);
10328 vport++;
10329 }
10330}
10331
10332static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10333{
10334 struct hclge_dev *hdev = ae_dev->priv;
10335 struct pci_dev *pdev = ae_dev->pdev;
10336 int ret;
10337
10338 set_bit(HCLGE_STATE_DOWN, &hdev->state);
10339
10340 hclge_stats_clear(hdev);
10341
10342
10343
10344 if (hdev->reset_type == HNAE3_IMP_RESET ||
10345 hdev->reset_type == HNAE3_GLOBAL_RESET) {
10346 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10347 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10348 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10349 hclge_reset_umv_space(hdev);
10350 }
10351
10352 ret = hclge_cmd_init(hdev);
10353 if (ret) {
10354 dev_err(&pdev->dev, "Cmd queue init failed\n");
10355 return ret;
10356 }
10357
10358 ret = hclge_map_tqp(hdev);
10359 if (ret) {
10360 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10361 return ret;
10362 }
10363
10364 ret = hclge_mac_init(hdev);
10365 if (ret) {
10366 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10367 return ret;
10368 }
10369
10370 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10371 if (ret) {
10372 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10373 return ret;
10374 }
10375
10376 ret = hclge_config_gro(hdev, true);
10377 if (ret)
10378 return ret;
10379
10380 ret = hclge_init_vlan_config(hdev);
10381 if (ret) {
10382 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10383 return ret;
10384 }
10385
10386 ret = hclge_tm_init_hw(hdev, true);
10387 if (ret) {
10388 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10389 return ret;
10390 }
10391
10392 ret = hclge_rss_init_hw(hdev);
10393 if (ret) {
10394 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10395 return ret;
10396 }
10397
10398 ret = init_mgr_tbl(hdev);
10399 if (ret) {
10400 dev_err(&pdev->dev,
10401 "failed to reinit manager table, ret = %d\n", ret);
10402 return ret;
10403 }
10404
10405 ret = hclge_init_fd_config(hdev);
10406 if (ret) {
10407 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10408 return ret;
10409 }
10410
10411
10412 hclge_handle_all_hns_hw_errors(ae_dev);
10413
10414
10415
10416
10417 ret = hclge_config_nic_hw_error(hdev, true);
10418 if (ret) {
10419 dev_err(&pdev->dev,
10420 "fail(%d) to re-enable NIC hw error interrupts\n",
10421 ret);
10422 return ret;
10423 }
10424
10425 if (hdev->roce_client) {
10426 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10427 if (ret) {
10428 dev_err(&pdev->dev,
10429 "fail(%d) to re-enable roce ras interrupts\n",
10430 ret);
10431 return ret;
10432 }
10433 }
10434
10435 hclge_reset_vport_state(hdev);
10436 ret = hclge_reset_vport_spoofchk(hdev);
10437 if (ret)
10438 return ret;
10439
10440 ret = hclge_resume_vf_rate(hdev);
10441 if (ret)
10442 return ret;
10443
10444 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10445 HCLGE_DRIVER_NAME);
10446
10447 return 0;
10448}
10449
10450static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10451{
10452 struct hclge_dev *hdev = ae_dev->priv;
10453 struct hclge_mac *mac = &hdev->hw.mac;
10454
10455 hclge_reset_vf_rate(hdev);
10456 hclge_clear_vf_vlan(hdev);
10457 hclge_misc_affinity_teardown(hdev);
10458 hclge_state_uninit(hdev);
10459 hclge_uninit_mac_table(hdev);
10460
10461 if (mac->phydev)
10462 mdiobus_unregister(mac->mdio_bus);
10463
10464
10465 hclge_enable_vector(&hdev->misc_vector, false);
10466 synchronize_irq(hdev->misc_vector.vector_irq);
10467
10468
10469 hclge_config_mac_tnl_int(hdev, false);
10470 hclge_config_nic_hw_error(hdev, false);
10471 hclge_config_rocee_ras_interrupt(hdev, false);
10472
10473 hclge_cmd_uninit(hdev);
10474 hclge_misc_irq_uninit(hdev);
10475 hclge_pci_uninit(hdev);
10476 mutex_destroy(&hdev->vport_lock);
10477 hclge_uninit_vport_vlan_table(hdev);
10478 ae_dev->priv = NULL;
10479}
10480
10481static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10482{
10483 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10484 struct hclge_vport *vport = hclge_get_vport(handle);
10485 struct hclge_dev *hdev = vport->back;
10486
10487 return min_t(u32, hdev->rss_size_max,
10488 vport->alloc_tqps / kinfo->num_tc);
10489}
10490
10491static void hclge_get_channels(struct hnae3_handle *handle,
10492 struct ethtool_channels *ch)
10493{
10494 ch->max_combined = hclge_get_max_channels(handle);
10495 ch->other_count = 1;
10496 ch->max_other = 1;
10497 ch->combined_count = handle->kinfo.rss_size;
10498}
10499
10500static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10501 u16 *alloc_tqps, u16 *max_rss_size)
10502{
10503 struct hclge_vport *vport = hclge_get_vport(handle);
10504 struct hclge_dev *hdev = vport->back;
10505
10506 *alloc_tqps = vport->alloc_tqps;
10507 *max_rss_size = hdev->rss_size_max;
10508}
10509
10510static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10511 bool rxfh_configured)
10512{
10513 struct hclge_vport *vport = hclge_get_vport(handle);
10514 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10515 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10516 struct hclge_dev *hdev = vport->back;
10517 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10518 u16 cur_rss_size = kinfo->rss_size;
10519 u16 cur_tqps = kinfo->num_tqps;
10520 u16 tc_valid[HCLGE_MAX_TC_NUM];
10521 u16 roundup_size;
10522 u32 *rss_indir;
10523 unsigned int i;
10524 int ret;
10525
10526 kinfo->req_rss_size = new_tqps_num;
10527
10528 ret = hclge_tm_vport_map_update(hdev);
10529 if (ret) {
10530 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10531 return ret;
10532 }
10533
10534 roundup_size = roundup_pow_of_two(kinfo->rss_size);
10535 roundup_size = ilog2(roundup_size);
10536
10537 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10538 tc_valid[i] = 0;
10539
10540 if (!(hdev->hw_tc_map & BIT(i)))
10541 continue;
10542
10543 tc_valid[i] = 1;
10544 tc_size[i] = roundup_size;
10545 tc_offset[i] = kinfo->rss_size * i;
10546 }
10547 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10548 if (ret)
10549 return ret;
10550
10551
10552 if (rxfh_configured)
10553 goto out;
10554
10555
10556 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10557 if (!rss_indir)
10558 return -ENOMEM;
10559
10560 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10561 rss_indir[i] = i % kinfo->rss_size;
10562
10563 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10564 if (ret)
10565 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10566 ret);
10567
10568 kfree(rss_indir);
10569
10570out:
10571 if (!ret)
10572 dev_info(&hdev->pdev->dev,
10573 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10574 cur_rss_size, kinfo->rss_size,
10575 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10576
10577 return ret;
10578}
10579
10580static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10581 u32 *regs_num_64_bit)
10582{
10583 struct hclge_desc desc;
10584 u32 total_num;
10585 int ret;
10586
10587 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10588 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10589 if (ret) {
10590 dev_err(&hdev->pdev->dev,
10591 "Query register number cmd failed, ret = %d.\n", ret);
10592 return ret;
10593 }
10594
10595 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10596 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10597
10598 total_num = *regs_num_32_bit + *regs_num_64_bit;
10599 if (!total_num)
10600 return -EINVAL;
10601
10602 return 0;
10603}
10604
10605static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10606 void *data)
10607{
10608#define HCLGE_32_BIT_REG_RTN_DATANUM 8
10609#define HCLGE_32_BIT_DESC_NODATA_LEN 2
10610
10611 struct hclge_desc *desc;
10612 u32 *reg_val = data;
10613 __le32 *desc_data;
10614 int nodata_num;
10615 int cmd_num;
10616 int i, k, n;
10617 int ret;
10618
10619 if (regs_num == 0)
10620 return 0;
10621
10622 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10623 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10624 HCLGE_32_BIT_REG_RTN_DATANUM);
10625 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10626 if (!desc)
10627 return -ENOMEM;
10628
10629 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10630 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10631 if (ret) {
10632 dev_err(&hdev->pdev->dev,
10633 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10634 kfree(desc);
10635 return ret;
10636 }
10637
10638 for (i = 0; i < cmd_num; i++) {
10639 if (i == 0) {
10640 desc_data = (__le32 *)(&desc[i].data[0]);
10641 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10642 } else {
10643 desc_data = (__le32 *)(&desc[i]);
10644 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10645 }
10646 for (k = 0; k < n; k++) {
10647 *reg_val++ = le32_to_cpu(*desc_data++);
10648
10649 regs_num--;
10650 if (!regs_num)
10651 break;
10652 }
10653 }
10654
10655 kfree(desc);
10656 return 0;
10657}
10658
10659static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10660 void *data)
10661{
10662#define HCLGE_64_BIT_REG_RTN_DATANUM 4
10663#define HCLGE_64_BIT_DESC_NODATA_LEN 1
10664
10665 struct hclge_desc *desc;
10666 u64 *reg_val = data;
10667 __le64 *desc_data;
10668 int nodata_len;
10669 int cmd_num;
10670 int i, k, n;
10671 int ret;
10672
10673 if (regs_num == 0)
10674 return 0;
10675
10676 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10677 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10678 HCLGE_64_BIT_REG_RTN_DATANUM);
10679 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10680 if (!desc)
10681 return -ENOMEM;
10682
10683 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10684 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10685 if (ret) {
10686 dev_err(&hdev->pdev->dev,
10687 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10688 kfree(desc);
10689 return ret;
10690 }
10691
10692 for (i = 0; i < cmd_num; i++) {
10693 if (i == 0) {
10694 desc_data = (__le64 *)(&desc[i].data[0]);
10695 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10696 } else {
10697 desc_data = (__le64 *)(&desc[i]);
10698 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10699 }
10700 for (k = 0; k < n; k++) {
10701 *reg_val++ = le64_to_cpu(*desc_data++);
10702
10703 regs_num--;
10704 if (!regs_num)
10705 break;
10706 }
10707 }
10708
10709 kfree(desc);
10710 return 0;
10711}
10712
10713#define MAX_SEPARATE_NUM 4
10714#define SEPARATOR_VALUE 0xFDFCFBFA
10715#define REG_NUM_PER_LINE 4
10716#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10717#define REG_SEPARATOR_LINE 1
10718#define REG_NUM_REMAIN_MASK 3
10719#define BD_LIST_MAX_NUM 30
10720
10721int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10722{
10723 int i;
10724
10725
10726 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10727 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10728 true);
10729 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10730 }
10731
10732
10733 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10734
10735 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10736}
10737
10738static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10739 int *bd_num_list,
10740 u32 type_num)
10741{
10742 u32 entries_per_desc, desc_index, index, offset, i;
10743 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10744 int ret;
10745
10746 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10747 if (ret) {
10748 dev_err(&hdev->pdev->dev,
10749 "Get dfx bd num fail, status is %d.\n", ret);
10750 return ret;
10751 }
10752
10753 entries_per_desc = ARRAY_SIZE(desc[0].data);
10754 for (i = 0; i < type_num; i++) {
10755 offset = hclge_dfx_bd_offset_list[i];
10756 index = offset % entries_per_desc;
10757 desc_index = offset / entries_per_desc;
10758 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10759 }
10760
10761 return ret;
10762}
10763
10764static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10765 struct hclge_desc *desc_src, int bd_num,
10766 enum hclge_opcode_type cmd)
10767{
10768 struct hclge_desc *desc = desc_src;
10769 int i, ret;
10770
10771 hclge_cmd_setup_basic_desc(desc, cmd, true);
10772 for (i = 0; i < bd_num - 1; i++) {
10773 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10774 desc++;
10775 hclge_cmd_setup_basic_desc(desc, cmd, true);
10776 }
10777
10778 desc = desc_src;
10779 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10780 if (ret)
10781 dev_err(&hdev->pdev->dev,
10782 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10783 cmd, ret);
10784
10785 return ret;
10786}
10787
10788static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10789 void *data)
10790{
10791 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10792 struct hclge_desc *desc = desc_src;
10793 u32 *reg = data;
10794
10795 entries_per_desc = ARRAY_SIZE(desc->data);
10796 reg_num = entries_per_desc * bd_num;
10797 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10798 for (i = 0; i < reg_num; i++) {
10799 index = i % entries_per_desc;
10800 desc_index = i / entries_per_desc;
10801 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10802 }
10803 for (i = 0; i < separator_num; i++)
10804 *reg++ = SEPARATOR_VALUE;
10805
10806 return reg_num + separator_num;
10807}
10808
10809static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10810{
10811 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10812 int data_len_per_desc, bd_num, i;
10813 int bd_num_list[BD_LIST_MAX_NUM];
10814 u32 data_len;
10815 int ret;
10816
10817 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10818 if (ret) {
10819 dev_err(&hdev->pdev->dev,
10820 "Get dfx reg bd num fail, status is %d.\n", ret);
10821 return ret;
10822 }
10823
10824 data_len_per_desc = sizeof_field(struct hclge_desc, data);
10825 *len = 0;
10826 for (i = 0; i < dfx_reg_type_num; i++) {
10827 bd_num = bd_num_list[i];
10828 data_len = data_len_per_desc * bd_num;
10829 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10830 }
10831
10832 return ret;
10833}
10834
10835static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10836{
10837 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10838 int bd_num, bd_num_max, buf_len, i;
10839 int bd_num_list[BD_LIST_MAX_NUM];
10840 struct hclge_desc *desc_src;
10841 u32 *reg = data;
10842 int ret;
10843
10844 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10845 if (ret) {
10846 dev_err(&hdev->pdev->dev,
10847 "Get dfx reg bd num fail, status is %d.\n", ret);
10848 return ret;
10849 }
10850
10851 bd_num_max = bd_num_list[0];
10852 for (i = 1; i < dfx_reg_type_num; i++)
10853 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10854
10855 buf_len = sizeof(*desc_src) * bd_num_max;
10856 desc_src = kzalloc(buf_len, GFP_KERNEL);
10857 if (!desc_src)
10858 return -ENOMEM;
10859
10860 for (i = 0; i < dfx_reg_type_num; i++) {
10861 bd_num = bd_num_list[i];
10862 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10863 hclge_dfx_reg_opcode_list[i]);
10864 if (ret) {
10865 dev_err(&hdev->pdev->dev,
10866 "Get dfx reg fail, status is %d.\n", ret);
10867 break;
10868 }
10869
10870 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10871 }
10872
10873 kfree(desc_src);
10874 return ret;
10875}
10876
10877static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10878 struct hnae3_knic_private_info *kinfo)
10879{
10880#define HCLGE_RING_REG_OFFSET 0x200
10881#define HCLGE_RING_INT_REG_OFFSET 0x4
10882
10883 int i, j, reg_num, separator_num;
10884 int data_num_sum;
10885 u32 *reg = data;
10886
10887
10888 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10889 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10890 for (i = 0; i < reg_num; i++)
10891 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10892 for (i = 0; i < separator_num; i++)
10893 *reg++ = SEPARATOR_VALUE;
10894 data_num_sum = reg_num + separator_num;
10895
10896 reg_num = ARRAY_SIZE(common_reg_addr_list);
10897 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10898 for (i = 0; i < reg_num; i++)
10899 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10900 for (i = 0; i < separator_num; i++)
10901 *reg++ = SEPARATOR_VALUE;
10902 data_num_sum += reg_num + separator_num;
10903
10904 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10905 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10906 for (j = 0; j < kinfo->num_tqps; j++) {
10907 for (i = 0; i < reg_num; i++)
10908 *reg++ = hclge_read_dev(&hdev->hw,
10909 ring_reg_addr_list[i] +
10910 HCLGE_RING_REG_OFFSET * j);
10911 for (i = 0; i < separator_num; i++)
10912 *reg++ = SEPARATOR_VALUE;
10913 }
10914 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10915
10916 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10917 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10918 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10919 for (i = 0; i < reg_num; i++)
10920 *reg++ = hclge_read_dev(&hdev->hw,
10921 tqp_intr_reg_addr_list[i] +
10922 HCLGE_RING_INT_REG_OFFSET * j);
10923 for (i = 0; i < separator_num; i++)
10924 *reg++ = SEPARATOR_VALUE;
10925 }
10926 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10927
10928 return data_num_sum;
10929}
10930
10931static int hclge_get_regs_len(struct hnae3_handle *handle)
10932{
10933 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10934 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10935 struct hclge_vport *vport = hclge_get_vport(handle);
10936 struct hclge_dev *hdev = vport->back;
10937 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10938 int regs_lines_32_bit, regs_lines_64_bit;
10939 int ret;
10940
10941 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10942 if (ret) {
10943 dev_err(&hdev->pdev->dev,
10944 "Get register number failed, ret = %d.\n", ret);
10945 return ret;
10946 }
10947
10948 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10949 if (ret) {
10950 dev_err(&hdev->pdev->dev,
10951 "Get dfx reg len failed, ret = %d.\n", ret);
10952 return ret;
10953 }
10954
10955 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10956 REG_SEPARATOR_LINE;
10957 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10958 REG_SEPARATOR_LINE;
10959 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10960 REG_SEPARATOR_LINE;
10961 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10962 REG_SEPARATOR_LINE;
10963 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10964 REG_SEPARATOR_LINE;
10965 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10966 REG_SEPARATOR_LINE;
10967
10968 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10969 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10970 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10971}
10972
10973static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10974 void *data)
10975{
10976 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10977 struct hclge_vport *vport = hclge_get_vport(handle);
10978 struct hclge_dev *hdev = vport->back;
10979 u32 regs_num_32_bit, regs_num_64_bit;
10980 int i, reg_num, separator_num, ret;
10981 u32 *reg = data;
10982
10983 *version = hdev->fw_version;
10984
10985 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10986 if (ret) {
10987 dev_err(&hdev->pdev->dev,
10988 "Get register number failed, ret = %d.\n", ret);
10989 return;
10990 }
10991
10992 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10993
10994 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10995 if (ret) {
10996 dev_err(&hdev->pdev->dev,
10997 "Get 32 bit register failed, ret = %d.\n", ret);
10998 return;
10999 }
11000 reg_num = regs_num_32_bit;
11001 reg += reg_num;
11002 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11003 for (i = 0; i < separator_num; i++)
11004 *reg++ = SEPARATOR_VALUE;
11005
11006 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11007 if (ret) {
11008 dev_err(&hdev->pdev->dev,
11009 "Get 64 bit register failed, ret = %d.\n", ret);
11010 return;
11011 }
11012 reg_num = regs_num_64_bit * 2;
11013 reg += reg_num;
11014 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11015 for (i = 0; i < separator_num; i++)
11016 *reg++ = SEPARATOR_VALUE;
11017
11018 ret = hclge_get_dfx_reg(hdev, reg);
11019 if (ret)
11020 dev_err(&hdev->pdev->dev,
11021 "Get dfx register failed, ret = %d.\n", ret);
11022}
11023
11024static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11025{
11026 struct hclge_set_led_state_cmd *req;
11027 struct hclge_desc desc;
11028 int ret;
11029
11030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11031
11032 req = (struct hclge_set_led_state_cmd *)desc.data;
11033 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11034 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11035
11036 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11037 if (ret)
11038 dev_err(&hdev->pdev->dev,
11039 "Send set led state cmd error, ret =%d\n", ret);
11040
11041 return ret;
11042}
11043
11044enum hclge_led_status {
11045 HCLGE_LED_OFF,
11046 HCLGE_LED_ON,
11047 HCLGE_LED_NO_CHANGE = 0xFF,
11048};
11049
11050static int hclge_set_led_id(struct hnae3_handle *handle,
11051 enum ethtool_phys_id_state status)
11052{
11053 struct hclge_vport *vport = hclge_get_vport(handle);
11054 struct hclge_dev *hdev = vport->back;
11055
11056 switch (status) {
11057 case ETHTOOL_ID_ACTIVE:
11058 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11059 case ETHTOOL_ID_INACTIVE:
11060 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11061 default:
11062 return -EINVAL;
11063 }
11064}
11065
11066static void hclge_get_link_mode(struct hnae3_handle *handle,
11067 unsigned long *supported,
11068 unsigned long *advertising)
11069{
11070 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11071 struct hclge_vport *vport = hclge_get_vport(handle);
11072 struct hclge_dev *hdev = vport->back;
11073 unsigned int idx = 0;
11074
11075 for (; idx < size; idx++) {
11076 supported[idx] = hdev->hw.mac.supported[idx];
11077 advertising[idx] = hdev->hw.mac.advertising[idx];
11078 }
11079}
11080
11081static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11082{
11083 struct hclge_vport *vport = hclge_get_vport(handle);
11084 struct hclge_dev *hdev = vport->back;
11085
11086 return hclge_config_gro(hdev, enable);
11087}
11088
11089static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11090{
11091 struct hclge_vport *vport = &hdev->vport[0];
11092 struct hnae3_handle *handle = &vport->nic;
11093 u8 tmp_flags = 0;
11094 int ret;
11095
11096 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11097 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11098 vport->last_promisc_flags = vport->overflow_promisc_flags;
11099 }
11100
11101 if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11102 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11103 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11104 tmp_flags & HNAE3_MPE);
11105 if (!ret) {
11106 clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11107 hclge_enable_vlan_filter(handle,
11108 tmp_flags & HNAE3_VLAN_FLTR);
11109 }
11110 }
11111}
11112
11113static bool hclge_module_existed(struct hclge_dev *hdev)
11114{
11115 struct hclge_desc desc;
11116 u32 existed;
11117 int ret;
11118
11119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11120 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11121 if (ret) {
11122 dev_err(&hdev->pdev->dev,
11123 "failed to get SFP exist state, ret = %d\n", ret);
11124 return false;
11125 }
11126
11127 existed = le32_to_cpu(desc.data[0]);
11128
11129 return existed != 0;
11130}
11131
11132
11133
11134
11135static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11136 u32 len, u8 *data)
11137{
11138 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11139 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11140 u16 read_len;
11141 u16 copy_len;
11142 int ret;
11143 int i;
11144
11145
11146 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11147 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11148 true);
11149
11150
11151 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11152 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11153 }
11154
11155
11156 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11157 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11158 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11159 sfp_info_bd0->read_len = cpu_to_le16(read_len);
11160
11161 ret = hclge_cmd_send(&hdev->hw, desc, i);
11162 if (ret) {
11163 dev_err(&hdev->pdev->dev,
11164 "failed to get SFP eeprom info, ret = %d\n", ret);
11165 return 0;
11166 }
11167
11168
11169 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11170 memcpy(data, sfp_info_bd0->data, copy_len);
11171 read_len = copy_len;
11172
11173
11174 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11175 if (read_len >= len)
11176 return read_len;
11177
11178 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11179 memcpy(data + read_len, desc[i].data, copy_len);
11180 read_len += copy_len;
11181 }
11182
11183 return read_len;
11184}
11185
11186static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11187 u32 len, u8 *data)
11188{
11189 struct hclge_vport *vport = hclge_get_vport(handle);
11190 struct hclge_dev *hdev = vport->back;
11191 u32 read_len = 0;
11192 u16 data_len;
11193
11194 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11195 return -EOPNOTSUPP;
11196
11197 if (!hclge_module_existed(hdev))
11198 return -ENXIO;
11199
11200 while (read_len < len) {
11201 data_len = hclge_get_sfp_eeprom_info(hdev,
11202 offset + read_len,
11203 len - read_len,
11204 data + read_len);
11205 if (!data_len)
11206 return -EIO;
11207
11208 read_len += data_len;
11209 }
11210
11211 return 0;
11212}
11213
11214static const struct hnae3_ae_ops hclge_ops = {
11215 .init_ae_dev = hclge_init_ae_dev,
11216 .uninit_ae_dev = hclge_uninit_ae_dev,
11217 .flr_prepare = hclge_flr_prepare,
11218 .flr_done = hclge_flr_done,
11219 .init_client_instance = hclge_init_client_instance,
11220 .uninit_client_instance = hclge_uninit_client_instance,
11221 .map_ring_to_vector = hclge_map_ring_to_vector,
11222 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11223 .get_vector = hclge_get_vector,
11224 .put_vector = hclge_put_vector,
11225 .set_promisc_mode = hclge_set_promisc_mode,
11226 .request_update_promisc_mode = hclge_request_update_promisc_mode,
11227 .set_loopback = hclge_set_loopback,
11228 .start = hclge_ae_start,
11229 .stop = hclge_ae_stop,
11230 .client_start = hclge_client_start,
11231 .client_stop = hclge_client_stop,
11232 .get_status = hclge_get_status,
11233 .get_ksettings_an_result = hclge_get_ksettings_an_result,
11234 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11235 .get_media_type = hclge_get_media_type,
11236 .check_port_speed = hclge_check_port_speed,
11237 .get_fec = hclge_get_fec,
11238 .set_fec = hclge_set_fec,
11239 .get_rss_key_size = hclge_get_rss_key_size,
11240 .get_rss_indir_size = hclge_get_rss_indir_size,
11241 .get_rss = hclge_get_rss,
11242 .set_rss = hclge_set_rss,
11243 .set_rss_tuple = hclge_set_rss_tuple,
11244 .get_rss_tuple = hclge_get_rss_tuple,
11245 .get_tc_size = hclge_get_tc_size,
11246 .get_mac_addr = hclge_get_mac_addr,
11247 .set_mac_addr = hclge_set_mac_addr,
11248 .do_ioctl = hclge_do_ioctl,
11249 .add_uc_addr = hclge_add_uc_addr,
11250 .rm_uc_addr = hclge_rm_uc_addr,
11251 .add_mc_addr = hclge_add_mc_addr,
11252 .rm_mc_addr = hclge_rm_mc_addr,
11253 .set_autoneg = hclge_set_autoneg,
11254 .get_autoneg = hclge_get_autoneg,
11255 .restart_autoneg = hclge_restart_autoneg,
11256 .halt_autoneg = hclge_halt_autoneg,
11257 .get_pauseparam = hclge_get_pauseparam,
11258 .set_pauseparam = hclge_set_pauseparam,
11259 .set_mtu = hclge_set_mtu,
11260 .reset_queue = hclge_reset_tqp,
11261 .get_stats = hclge_get_stats,
11262 .get_mac_stats = hclge_get_mac_stat,
11263 .update_stats = hclge_update_stats,
11264 .get_strings = hclge_get_strings,
11265 .get_sset_count = hclge_get_sset_count,
11266 .get_fw_version = hclge_get_fw_version,
11267 .get_mdix_mode = hclge_get_mdix_mode,
11268 .enable_vlan_filter = hclge_enable_vlan_filter,
11269 .set_vlan_filter = hclge_set_vlan_filter,
11270 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11271 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11272 .reset_event = hclge_reset_event,
11273 .get_reset_level = hclge_get_reset_level,
11274 .set_default_reset_request = hclge_set_def_reset_request,
11275 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11276 .set_channels = hclge_set_channels,
11277 .get_channels = hclge_get_channels,
11278 .get_regs_len = hclge_get_regs_len,
11279 .get_regs = hclge_get_regs,
11280 .set_led_id = hclge_set_led_id,
11281 .get_link_mode = hclge_get_link_mode,
11282 .add_fd_entry = hclge_add_fd_entry,
11283 .del_fd_entry = hclge_del_fd_entry,
11284 .del_all_fd_entries = hclge_del_all_fd_entries,
11285 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11286 .get_fd_rule_info = hclge_get_fd_rule_info,
11287 .get_fd_all_rules = hclge_get_all_rules,
11288 .enable_fd = hclge_enable_fd,
11289 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11290 .dbg_run_cmd = hclge_dbg_run_cmd,
11291 .handle_hw_ras_error = hclge_handle_hw_ras_error,
11292 .get_hw_reset_stat = hclge_get_hw_reset_stat,
11293 .ae_dev_resetting = hclge_ae_dev_resetting,
11294 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11295 .set_gro_en = hclge_gro_en,
11296 .get_global_queue_id = hclge_covert_handle_qid_global,
11297 .set_timer_task = hclge_set_timer_task,
11298 .mac_connect_phy = hclge_mac_connect_phy,
11299 .mac_disconnect_phy = hclge_mac_disconnect_phy,
11300 .get_vf_config = hclge_get_vf_config,
11301 .set_vf_link_state = hclge_set_vf_link_state,
11302 .set_vf_spoofchk = hclge_set_vf_spoofchk,
11303 .set_vf_trust = hclge_set_vf_trust,
11304 .set_vf_rate = hclge_set_vf_rate,
11305 .set_vf_mac = hclge_set_vf_mac,
11306 .get_module_eeprom = hclge_get_module_eeprom,
11307 .get_cmdq_stat = hclge_get_cmdq_stat,
11308};
11309
11310static struct hnae3_ae_algo ae_algo = {
11311 .ops = &hclge_ops,
11312 .pdev_id_table = ae_algo_pci_tbl,
11313};
11314
11315static int hclge_init(void)
11316{
11317 pr_info("%s is initializing\n", HCLGE_NAME);
11318
11319 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11320 if (!hclge_wq) {
11321 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11322 return -ENOMEM;
11323 }
11324
11325 hnae3_register_ae_algo(&ae_algo);
11326
11327 return 0;
11328}
11329
11330static void hclge_exit(void)
11331{
11332 hnae3_unregister_ae_algo(&ae_algo);
11333 destroy_workqueue(hclge_wq);
11334}
11335module_init(hclge_init);
11336module_exit(hclge_exit);
11337
11338MODULE_LICENSE("GPL");
11339MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11340MODULE_DESCRIPTION("HCLGE Driver");
11341MODULE_VERSION(HCLGE_MOD_VERSION);
11342