1
2
3
4#include <linux/acpi.h>
5#include <linux/device.h>
6#include <linux/etherdevice.h>
7#include <linux/init.h>
8#include <linux/interrupt.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/pci.h>
13#include <linux/platform_device.h>
14#include <linux/if_vlan.h>
15#include <linux/crash_dump.h>
16#include <net/ipv6.h>
17#include <net/rtnetlink.h>
18#include "hclge_cmd.h"
19#include "hclge_dcb.h"
20#include "hclge_main.h"
21#include "hclge_mbx.h"
22#include "hclge_mdio.h"
23#include "hclge_tm.h"
24#include "hclge_err.h"
25#include "hnae3.h"
26#include "hclge_devlink.h"
27#include "hclge_comm_cmd.h"
28
29#define HCLGE_NAME "hclge"
30
31#define HCLGE_BUF_SIZE_UNIT 256U
32#define HCLGE_BUF_MUL_BY 2
33#define HCLGE_BUF_DIV_BY 2
34#define NEED_RESERVE_TC_NUM 2
35#define BUF_MAX_PERCENT 100
36#define BUF_RESERVE_PERCENT 90
37
38#define HCLGE_RESET_MAX_FAIL_CNT 5
39#define HCLGE_RESET_SYNC_TIME 100
40#define HCLGE_PF_RESET_SYNC_TIME 20
41#define HCLGE_PF_RESET_SYNC_CNT 1500
42
43
44#define HCLGE_DFX_BIOS_BD_OFFSET 1
45#define HCLGE_DFX_SSU_0_BD_OFFSET 2
46#define HCLGE_DFX_SSU_1_BD_OFFSET 3
47#define HCLGE_DFX_IGU_BD_OFFSET 4
48#define HCLGE_DFX_RPU_0_BD_OFFSET 5
49#define HCLGE_DFX_RPU_1_BD_OFFSET 6
50#define HCLGE_DFX_NCSI_BD_OFFSET 7
51#define HCLGE_DFX_RTC_BD_OFFSET 8
52#define HCLGE_DFX_PPP_BD_OFFSET 9
53#define HCLGE_DFX_RCB_BD_OFFSET 10
54#define HCLGE_DFX_TQP_BD_OFFSET 11
55#define HCLGE_DFX_SSU_2_BD_OFFSET 12
56
57#define HCLGE_LINK_STATUS_MS 10
58
59static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60static int hclge_init_vlan_config(struct hclge_dev *hdev);
61static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 unsigned long *addr);
68static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70static void hclge_sync_mac_table(struct hclge_dev *hdev);
71static void hclge_restore_hw_table(struct hclge_dev *hdev);
72static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73static void hclge_sync_fd_table(struct hclge_dev *hdev);
74
75static struct hnae3_ae_algo ae_algo;
76
77static struct workqueue_struct *hclge_wq;
78
79static const struct pci_device_id ae_algo_pci_tbl[] = {
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88
89 {0, }
90};
91
92MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
95 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
96 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
97 HCLGE_COMM_NIC_CSQ_TAIL_REG,
98 HCLGE_COMM_NIC_CSQ_HEAD_REG,
99 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
100 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
101 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
102 HCLGE_COMM_NIC_CRQ_TAIL_REG,
103 HCLGE_COMM_NIC_CRQ_HEAD_REG,
104 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
105 HCLGE_COMM_CMDQ_INTR_STS_REG,
106 HCLGE_COMM_CMDQ_INTR_EN_REG,
107 HCLGE_COMM_CMDQ_INTR_GEN_REG};
108
109static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 HCLGE_PF_OTHER_INT_REG,
111 HCLGE_MISC_RESET_STS_REG,
112 HCLGE_MISC_VECTOR_INT_STS,
113 HCLGE_GLOBAL_RESET_REG,
114 HCLGE_FUN_RST_ING,
115 HCLGE_GRO_EN_REG};
116
117static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 HCLGE_RING_RX_ADDR_H_REG,
119 HCLGE_RING_RX_BD_NUM_REG,
120 HCLGE_RING_RX_BD_LENGTH_REG,
121 HCLGE_RING_RX_MERGE_EN_REG,
122 HCLGE_RING_RX_TAIL_REG,
123 HCLGE_RING_RX_HEAD_REG,
124 HCLGE_RING_RX_FBD_NUM_REG,
125 HCLGE_RING_RX_OFFSET_REG,
126 HCLGE_RING_RX_FBD_OFFSET_REG,
127 HCLGE_RING_RX_STASH_REG,
128 HCLGE_RING_RX_BD_ERR_REG,
129 HCLGE_RING_TX_ADDR_L_REG,
130 HCLGE_RING_TX_ADDR_H_REG,
131 HCLGE_RING_TX_BD_NUM_REG,
132 HCLGE_RING_TX_PRIORITY_REG,
133 HCLGE_RING_TX_TC_REG,
134 HCLGE_RING_TX_MERGE_EN_REG,
135 HCLGE_RING_TX_TAIL_REG,
136 HCLGE_RING_TX_HEAD_REG,
137 HCLGE_RING_TX_FBD_NUM_REG,
138 HCLGE_RING_TX_OFFSET_REG,
139 HCLGE_RING_TX_EBD_NUM_REG,
140 HCLGE_RING_TX_EBD_OFFSET_REG,
141 HCLGE_RING_TX_BD_ERR_REG,
142 HCLGE_RING_EN_REG};
143
144static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 HCLGE_TQP_INTR_GL0_REG,
146 HCLGE_TQP_INTR_GL1_REG,
147 HCLGE_TQP_INTR_GL2_REG,
148 HCLGE_TQP_INTR_RL_REG};
149
150static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 "App Loopback test",
152 "Serdes serial Loopback test",
153 "Serdes parallel Loopback test",
154 "Phy Loopback test"
155};
156
157static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
159 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
163 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
164 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
166 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
167 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
168 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
170 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
171 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
172 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
174 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
176 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
178 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
180 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
182 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
184 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
186 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
188 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
190 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
192 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
194 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
196 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
198 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
200 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
202 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
204 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
205 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
206 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
207 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
208 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
209 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
210 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
211 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
212 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
213 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
214 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
215 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
216 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
217 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
218 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
219 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
220 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
221 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
222 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
223 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
224 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
225 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
226 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
228 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
230 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
232 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
234 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
236 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
238 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
240 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
242 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
244 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
246 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
248 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
250 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
252 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
254 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
256 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
258 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
260 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
262 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
264 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
266 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
268 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
270 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
272 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
274 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
276 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
278 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
280 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
282 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
284 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
286 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
288 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
290 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
292 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
294 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
296 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
298 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
300 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
302 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
304 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
306 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
308 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
310 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
312 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
314 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
316 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
318 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
320 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
322 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
324 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
326 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
328 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
330 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
332 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
334 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
336 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
338
339 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
340 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
341 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
342 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
343 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
344 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
345 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
346 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
347 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
348 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
349 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
350 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
351 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
353 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
355 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
357 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
359 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
361 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
363};
364
365static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
366 {
367 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
368 .ethter_type = cpu_to_le16(ETH_P_LLDP),
369 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
370 .i_port_bitmap = 0x1,
371 },
372};
373
374static const u32 hclge_dfx_bd_offset_list[] = {
375 HCLGE_DFX_BIOS_BD_OFFSET,
376 HCLGE_DFX_SSU_0_BD_OFFSET,
377 HCLGE_DFX_SSU_1_BD_OFFSET,
378 HCLGE_DFX_IGU_BD_OFFSET,
379 HCLGE_DFX_RPU_0_BD_OFFSET,
380 HCLGE_DFX_RPU_1_BD_OFFSET,
381 HCLGE_DFX_NCSI_BD_OFFSET,
382 HCLGE_DFX_RTC_BD_OFFSET,
383 HCLGE_DFX_PPP_BD_OFFSET,
384 HCLGE_DFX_RCB_BD_OFFSET,
385 HCLGE_DFX_TQP_BD_OFFSET,
386 HCLGE_DFX_SSU_2_BD_OFFSET
387};
388
389static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
390 HCLGE_OPC_DFX_BIOS_COMMON_REG,
391 HCLGE_OPC_DFX_SSU_REG_0,
392 HCLGE_OPC_DFX_SSU_REG_1,
393 HCLGE_OPC_DFX_IGU_EGU_REG,
394 HCLGE_OPC_DFX_RPU_REG_0,
395 HCLGE_OPC_DFX_RPU_REG_1,
396 HCLGE_OPC_DFX_NCSI_REG,
397 HCLGE_OPC_DFX_RTC_REG,
398 HCLGE_OPC_DFX_PPP_REG,
399 HCLGE_OPC_DFX_RCB_REG,
400 HCLGE_OPC_DFX_TQP_REG,
401 HCLGE_OPC_DFX_SSU_REG_2
402};
403
404static const struct key_info meta_data_key_info[] = {
405 { PACKET_TYPE_ID, 6 },
406 { IP_FRAGEMENT, 1 },
407 { ROCE_TYPE, 1 },
408 { NEXT_KEY, 5 },
409 { VLAN_NUMBER, 2 },
410 { SRC_VPORT, 12 },
411 { DST_VPORT, 12 },
412 { TUNNEL_PACKET, 1 },
413};
414
415static const struct key_info tuple_key_info[] = {
416 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
417 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
418 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
419 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
420 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
421 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
422 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
423 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
424 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
425 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
426 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
427 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
428 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
429 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
430 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
431 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
432 { INNER_DST_MAC, 48, KEY_OPT_MAC,
433 offsetof(struct hclge_fd_rule, tuples.dst_mac),
434 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
435 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
436 offsetof(struct hclge_fd_rule, tuples.src_mac),
437 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
438 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
439 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
440 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
441 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
442 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
443 offsetof(struct hclge_fd_rule, tuples.ether_proto),
444 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
445 { INNER_L2_RSV, 16, KEY_OPT_LE16,
446 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
447 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
448 { INNER_IP_TOS, 8, KEY_OPT_U8,
449 offsetof(struct hclge_fd_rule, tuples.ip_tos),
450 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
451 { INNER_IP_PROTO, 8, KEY_OPT_U8,
452 offsetof(struct hclge_fd_rule, tuples.ip_proto),
453 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
454 { INNER_SRC_IP, 32, KEY_OPT_IP,
455 offsetof(struct hclge_fd_rule, tuples.src_ip),
456 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
457 { INNER_DST_IP, 32, KEY_OPT_IP,
458 offsetof(struct hclge_fd_rule, tuples.dst_ip),
459 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
460 { INNER_L3_RSV, 16, KEY_OPT_LE16,
461 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
462 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
463 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
464 offsetof(struct hclge_fd_rule, tuples.src_port),
465 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
466 { INNER_DST_PORT, 16, KEY_OPT_LE16,
467 offsetof(struct hclge_fd_rule, tuples.dst_port),
468 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
469 { INNER_L4_RSV, 32, KEY_OPT_LE32,
470 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
471 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
472};
473
474
475
476
477
478
479
480
481
482
483int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
484{
485 return hclge_comm_cmd_send(&hw->hw, desc, num);
486}
487
488static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
489{
490#define HCLGE_MAC_CMD_NUM 21
491
492 u64 *data = (u64 *)(&hdev->mac_stats);
493 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
494 __le64 *desc_data;
495 u32 data_size;
496 int ret;
497 u32 i;
498
499 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
500 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
501 if (ret) {
502 dev_err(&hdev->pdev->dev,
503 "Get MAC pkt stats fail, status = %d.\n", ret);
504
505 return ret;
506 }
507
508
509 data_size = sizeof(desc) / (sizeof(u64)) - 1;
510
511 desc_data = (__le64 *)(&desc[0].data[0]);
512 for (i = 0; i < data_size; i++) {
513
514
515
516 *data += le64_to_cpu(*desc_data);
517 data++;
518 desc_data++;
519 }
520
521 return 0;
522}
523
524static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
525{
526#define HCLGE_REG_NUM_PER_DESC 4
527
528 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
529 u64 *data = (u64 *)(&hdev->mac_stats);
530 struct hclge_desc *desc;
531 __le64 *desc_data;
532 u32 data_size;
533 u32 desc_num;
534 int ret;
535 u32 i;
536
537
538 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
539
540
541
542
543 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
544 if (!desc)
545 return -ENOMEM;
546
547 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
548 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
549 if (ret) {
550 kfree(desc);
551 return ret;
552 }
553
554 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
555
556 desc_data = (__le64 *)(&desc[0].data[0]);
557 for (i = 0; i < data_size; i++) {
558
559
560
561 *data += le64_to_cpu(*desc_data);
562 data++;
563 desc_data++;
564 }
565
566 kfree(desc);
567
568 return 0;
569}
570
571static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
572{
573 struct hclge_desc desc;
574 int ret;
575
576
577
578
579
580
581 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
582 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
583 return 0;
584 }
585
586 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
587 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
588 if (ret) {
589 dev_err(&hdev->pdev->dev,
590 "failed to query mac statistic reg number, ret = %d\n",
591 ret);
592 return ret;
593 }
594
595 *reg_num = le32_to_cpu(desc.data[0]);
596 if (*reg_num == 0) {
597 dev_err(&hdev->pdev->dev,
598 "mac statistic reg number is invalid!\n");
599 return -ENODATA;
600 }
601
602 return 0;
603}
604
605int hclge_mac_update_stats(struct hclge_dev *hdev)
606{
607
608 if (hdev->ae_dev->dev_specs.mac_stats_num)
609 return hclge_mac_update_stats_complete(hdev);
610 else
611 return hclge_mac_update_stats_defective(hdev);
612}
613
614static int hclge_comm_get_count(struct hclge_dev *hdev,
615 const struct hclge_comm_stats_str strs[],
616 u32 size)
617{
618 int count = 0;
619 u32 i;
620
621 for (i = 0; i < size; i++)
622 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
623 count++;
624
625 return count;
626}
627
628static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
629 const struct hclge_comm_stats_str strs[],
630 int size, u64 *data)
631{
632 u64 *buf = data;
633 u32 i;
634
635 for (i = 0; i < size; i++) {
636 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
637 continue;
638
639 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
640 buf++;
641 }
642
643 return buf;
644}
645
646static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
647 const struct hclge_comm_stats_str strs[],
648 int size, u8 *data)
649{
650 char *buff = (char *)data;
651 u32 i;
652
653 if (stringset != ETH_SS_STATS)
654 return buff;
655
656 for (i = 0; i < size; i++) {
657 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
658 continue;
659
660 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
661 buff = buff + ETH_GSTRING_LEN;
662 }
663
664 return (u8 *)buff;
665}
666
667static void hclge_update_stats_for_all(struct hclge_dev *hdev)
668{
669 struct hnae3_handle *handle;
670 int status;
671
672 handle = &hdev->vport[0].nic;
673 if (handle->client) {
674 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
675 if (status) {
676 dev_err(&hdev->pdev->dev,
677 "Update TQPS stats fail, status = %d.\n",
678 status);
679 }
680 }
681
682 status = hclge_mac_update_stats(hdev);
683 if (status)
684 dev_err(&hdev->pdev->dev,
685 "Update MAC stats fail, status = %d.\n", status);
686}
687
688static void hclge_update_stats(struct hnae3_handle *handle,
689 struct net_device_stats *net_stats)
690{
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
693 int status;
694
695 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
696 return;
697
698 status = hclge_mac_update_stats(hdev);
699 if (status)
700 dev_err(&hdev->pdev->dev,
701 "Update MAC stats fail, status = %d.\n",
702 status);
703
704 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
705 if (status)
706 dev_err(&hdev->pdev->dev,
707 "Update TQPS stats fail, status = %d.\n",
708 status);
709
710 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
711}
712
713static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
714{
715#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
716 HNAE3_SUPPORT_PHY_LOOPBACK | \
717 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
718 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
719
720 struct hclge_vport *vport = hclge_get_vport(handle);
721 struct hclge_dev *hdev = vport->back;
722 int count = 0;
723
724
725
726
727
728
729 if (stringset == ETH_SS_TEST) {
730
731 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
732 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
733 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
734 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
735 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
736 count += 1;
737 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
738 }
739
740 count += 2;
741 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
742 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
743
744 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
745 hdev->hw.mac.phydev->drv->set_loopback) ||
746 hnae3_dev_phy_imp_supported(hdev)) {
747 count += 1;
748 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
749 }
750 } else if (stringset == ETH_SS_STATS) {
751 count = hclge_comm_get_count(hdev, g_mac_stats_string,
752 ARRAY_SIZE(g_mac_stats_string)) +
753 hclge_comm_tqps_get_sset_count(handle);
754 }
755
756 return count;
757}
758
759static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
760 u8 *data)
761{
762 struct hclge_vport *vport = hclge_get_vport(handle);
763 struct hclge_dev *hdev = vport->back;
764 u8 *p = (char *)data;
765 int size;
766
767 if (stringset == ETH_SS_STATS) {
768 size = ARRAY_SIZE(g_mac_stats_string);
769 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
770 size, p);
771 p = hclge_comm_tqps_get_strings(handle, p);
772 } else if (stringset == ETH_SS_TEST) {
773 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
774 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
775 ETH_GSTRING_LEN);
776 p += ETH_GSTRING_LEN;
777 }
778 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
779 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
780 ETH_GSTRING_LEN);
781 p += ETH_GSTRING_LEN;
782 }
783 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
784 memcpy(p,
785 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
786 ETH_GSTRING_LEN);
787 p += ETH_GSTRING_LEN;
788 }
789 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
790 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
791 ETH_GSTRING_LEN);
792 p += ETH_GSTRING_LEN;
793 }
794 }
795}
796
797static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
798{
799 struct hclge_vport *vport = hclge_get_vport(handle);
800 struct hclge_dev *hdev = vport->back;
801 u64 *p;
802
803 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
804 ARRAY_SIZE(g_mac_stats_string), data);
805 p = hclge_comm_tqps_get_stats(handle, p);
806}
807
808static void hclge_get_mac_stat(struct hnae3_handle *handle,
809 struct hns3_mac_stats *mac_stats)
810{
811 struct hclge_vport *vport = hclge_get_vport(handle);
812 struct hclge_dev *hdev = vport->back;
813
814 hclge_update_stats(handle, NULL);
815
816 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
817 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
818}
819
820static int hclge_parse_func_status(struct hclge_dev *hdev,
821 struct hclge_func_status_cmd *status)
822{
823#define HCLGE_MAC_ID_MASK 0xF
824
825 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
826 return -EINVAL;
827
828
829 if (status->pf_state & HCLGE_PF_STATE_MAIN)
830 hdev->flag |= HCLGE_FLAG_MAIN;
831 else
832 hdev->flag &= ~HCLGE_FLAG_MAIN;
833
834 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
835 return 0;
836}
837
838static int hclge_query_function_status(struct hclge_dev *hdev)
839{
840#define HCLGE_QUERY_MAX_CNT 5
841
842 struct hclge_func_status_cmd *req;
843 struct hclge_desc desc;
844 int timeout = 0;
845 int ret;
846
847 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
848 req = (struct hclge_func_status_cmd *)desc.data;
849
850 do {
851 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
852 if (ret) {
853 dev_err(&hdev->pdev->dev,
854 "query function status failed %d.\n", ret);
855 return ret;
856 }
857
858
859 if (req->pf_state)
860 break;
861 usleep_range(1000, 2000);
862 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
863
864 return hclge_parse_func_status(hdev, req);
865}
866
867static int hclge_query_pf_resource(struct hclge_dev *hdev)
868{
869 struct hclge_pf_res_cmd *req;
870 struct hclge_desc desc;
871 int ret;
872
873 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 if (ret) {
876 dev_err(&hdev->pdev->dev,
877 "query pf resource failed %d.\n", ret);
878 return ret;
879 }
880
881 req = (struct hclge_pf_res_cmd *)desc.data;
882 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
883 le16_to_cpu(req->ext_tqp_num);
884 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885
886 if (req->tx_buf_size)
887 hdev->tx_buf_size =
888 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889 else
890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891
892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893
894 if (req->dv_buf_size)
895 hdev->dv_buf_size =
896 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897 else
898 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899
900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
903 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
904 dev_err(&hdev->pdev->dev,
905 "only %u msi resources available, not enough for pf(min:2).\n",
906 hdev->num_nic_msi);
907 return -EINVAL;
908 }
909
910 if (hnae3_dev_roce_supported(hdev)) {
911 hdev->num_roce_msi =
912 le16_to_cpu(req->pf_intr_vector_number_roce);
913
914
915
916
917 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
918 } else {
919 hdev->num_msi = hdev->num_nic_msi;
920 }
921
922 return 0;
923}
924
925static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
926{
927 switch (speed_cmd) {
928 case HCLGE_FW_MAC_SPEED_10M:
929 *speed = HCLGE_MAC_SPEED_10M;
930 break;
931 case HCLGE_FW_MAC_SPEED_100M:
932 *speed = HCLGE_MAC_SPEED_100M;
933 break;
934 case HCLGE_FW_MAC_SPEED_1G:
935 *speed = HCLGE_MAC_SPEED_1G;
936 break;
937 case HCLGE_FW_MAC_SPEED_10G:
938 *speed = HCLGE_MAC_SPEED_10G;
939 break;
940 case HCLGE_FW_MAC_SPEED_25G:
941 *speed = HCLGE_MAC_SPEED_25G;
942 break;
943 case HCLGE_FW_MAC_SPEED_40G:
944 *speed = HCLGE_MAC_SPEED_40G;
945 break;
946 case HCLGE_FW_MAC_SPEED_50G:
947 *speed = HCLGE_MAC_SPEED_50G;
948 break;
949 case HCLGE_FW_MAC_SPEED_100G:
950 *speed = HCLGE_MAC_SPEED_100G;
951 break;
952 case HCLGE_FW_MAC_SPEED_200G:
953 *speed = HCLGE_MAC_SPEED_200G;
954 break;
955 default:
956 return -EINVAL;
957 }
958
959 return 0;
960}
961
962static const struct hclge_speed_bit_map speed_bit_map[] = {
963 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
964 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
965 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
966 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
967 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
968 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
969 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
970 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
971 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
972};
973
974static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
975{
976 u16 i;
977
978 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
979 if (speed == speed_bit_map[i].speed) {
980 *speed_bit = speed_bit_map[i].speed_bit;
981 return 0;
982 }
983 }
984
985 return -EINVAL;
986}
987
988static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
989{
990 struct hclge_vport *vport = hclge_get_vport(handle);
991 struct hclge_dev *hdev = vport->back;
992 u32 speed_ability = hdev->hw.mac.speed_ability;
993 u32 speed_bit = 0;
994 int ret;
995
996 ret = hclge_get_speed_bit(speed, &speed_bit);
997 if (ret)
998 return ret;
999
1000 if (speed_bit & speed_ability)
1001 return 0;
1002
1003 return -EINVAL;
1004}
1005
1006static void hclge_convert_setting_sr(u16 speed_ability,
1007 unsigned long *link_mode)
1008{
1009 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1010 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1011 link_mode);
1012 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1013 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1014 link_mode);
1015 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1016 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1017 link_mode);
1018 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1020 link_mode);
1021 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1022 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1023 link_mode);
1024 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1025 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1026 link_mode);
1027}
1028
1029static void hclge_convert_setting_lr(u16 speed_ability,
1030 unsigned long *link_mode)
1031{
1032 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034 link_mode);
1035 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037 link_mode);
1038 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040 link_mode);
1041 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043 link_mode);
1044 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046 link_mode);
1047 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1048 linkmode_set_bit(
1049 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1050 link_mode);
1051}
1052
1053static void hclge_convert_setting_cr(u16 speed_ability,
1054 unsigned long *link_mode)
1055{
1056 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1057 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1058 link_mode);
1059 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1061 link_mode);
1062 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1063 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1064 link_mode);
1065 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1066 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1067 link_mode);
1068 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1069 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1070 link_mode);
1071 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1072 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1073 link_mode);
1074}
1075
1076static void hclge_convert_setting_kr(u16 speed_ability,
1077 unsigned long *link_mode)
1078{
1079 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1081 link_mode);
1082 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1084 link_mode);
1085 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1087 link_mode);
1088 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1090 link_mode);
1091 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1092 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1093 link_mode);
1094 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1095 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1096 link_mode);
1097 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1098 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1099 link_mode);
1100}
1101
1102static void hclge_convert_setting_fec(struct hclge_mac *mac)
1103{
1104 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1105 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1106
1107 switch (mac->speed) {
1108 case HCLGE_MAC_SPEED_10G:
1109 case HCLGE_MAC_SPEED_40G:
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1111 mac->supported);
1112 mac->fec_ability =
1113 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1114 break;
1115 case HCLGE_MAC_SPEED_25G:
1116 case HCLGE_MAC_SPEED_50G:
1117 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1118 mac->supported);
1119 mac->fec_ability =
1120 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1121 BIT(HNAE3_FEC_AUTO);
1122 break;
1123 case HCLGE_MAC_SPEED_100G:
1124 case HCLGE_MAC_SPEED_200G:
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1126 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1127 break;
1128 default:
1129 mac->fec_ability = 0;
1130 break;
1131 }
1132}
1133
1134static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1135 u16 speed_ability)
1136{
1137 struct hclge_mac *mac = &hdev->hw.mac;
1138
1139 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1141 mac->supported);
1142
1143 hclge_convert_setting_sr(speed_ability, mac->supported);
1144 hclge_convert_setting_lr(speed_ability, mac->supported);
1145 hclge_convert_setting_cr(speed_ability, mac->supported);
1146 if (hnae3_dev_fec_supported(hdev))
1147 hclge_convert_setting_fec(mac);
1148
1149 if (hnae3_dev_pause_supported(hdev))
1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1154}
1155
1156static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1157 u16 speed_ability)
1158{
1159 struct hclge_mac *mac = &hdev->hw.mac;
1160
1161 hclge_convert_setting_kr(speed_ability, mac->supported);
1162 if (hnae3_dev_fec_supported(hdev))
1163 hclge_convert_setting_fec(mac);
1164
1165 if (hnae3_dev_pause_supported(hdev))
1166 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1167
1168 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1169 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1170}
1171
1172static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1173 u16 speed_ability)
1174{
1175 unsigned long *supported = hdev->hw.mac.supported;
1176
1177
1178 if (!speed_ability)
1179 speed_ability = HCLGE_SUPPORT_GE;
1180
1181 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1183 supported);
1184
1185 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1187 supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1189 supported);
1190 }
1191
1192 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1193 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1194 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1195 }
1196
1197 if (hnae3_dev_pause_supported(hdev)) {
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201
1202 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1204}
1205
1206static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1207{
1208 u8 media_type = hdev->hw.mac.media_type;
1209
1210 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1211 hclge_parse_fiber_link_mode(hdev, speed_ability);
1212 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1213 hclge_parse_copper_link_mode(hdev, speed_ability);
1214 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1215 hclge_parse_backplane_link_mode(hdev, speed_ability);
1216}
1217
1218static u32 hclge_get_max_speed(u16 speed_ability)
1219{
1220 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1221 return HCLGE_MAC_SPEED_200G;
1222
1223 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1224 return HCLGE_MAC_SPEED_100G;
1225
1226 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1227 return HCLGE_MAC_SPEED_50G;
1228
1229 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1230 return HCLGE_MAC_SPEED_40G;
1231
1232 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1233 return HCLGE_MAC_SPEED_25G;
1234
1235 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1236 return HCLGE_MAC_SPEED_10G;
1237
1238 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1239 return HCLGE_MAC_SPEED_1G;
1240
1241 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1242 return HCLGE_MAC_SPEED_100M;
1243
1244 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1245 return HCLGE_MAC_SPEED_10M;
1246
1247 return HCLGE_MAC_SPEED_1G;
1248}
1249
1250static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1251{
1252#define HCLGE_TX_SPARE_SIZE_UNIT 4096
1253#define SPEED_ABILITY_EXT_SHIFT 8
1254
1255 struct hclge_cfg_param_cmd *req;
1256 u64 mac_addr_tmp_high;
1257 u16 speed_ability_ext;
1258 u64 mac_addr_tmp;
1259 unsigned int i;
1260
1261 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1262
1263
1264 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1266 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1267 HCLGE_CFG_TQP_DESC_N_M,
1268 HCLGE_CFG_TQP_DESC_N_S);
1269
1270 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1271 HCLGE_CFG_PHY_ADDR_M,
1272 HCLGE_CFG_PHY_ADDR_S);
1273 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1274 HCLGE_CFG_MEDIA_TP_M,
1275 HCLGE_CFG_MEDIA_TP_S);
1276 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 HCLGE_CFG_RX_BUF_LEN_M,
1278 HCLGE_CFG_RX_BUF_LEN_S);
1279
1280 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1281 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1282 HCLGE_CFG_MAC_ADDR_H_M,
1283 HCLGE_CFG_MAC_ADDR_H_S);
1284
1285 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1286
1287 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1288 HCLGE_CFG_DEFAULT_SPEED_M,
1289 HCLGE_CFG_DEFAULT_SPEED_S);
1290 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291 HCLGE_CFG_RSS_SIZE_M,
1292 HCLGE_CFG_RSS_SIZE_S);
1293
1294 for (i = 0; i < ETH_ALEN; i++)
1295 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1296
1297 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1298 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1299
1300 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_SPEED_ABILITY_M,
1302 HCLGE_CFG_SPEED_ABILITY_S);
1303 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1305 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1306 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1307
1308 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1309 HCLGE_CFG_VLAN_FLTR_CAP_M,
1310 HCLGE_CFG_VLAN_FLTR_CAP_S);
1311
1312 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 HCLGE_CFG_UMV_TBL_SPACE_M,
1314 HCLGE_CFG_UMV_TBL_SPACE_S);
1315
1316 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1317 HCLGE_CFG_PF_RSS_SIZE_M,
1318 HCLGE_CFG_PF_RSS_SIZE_S);
1319
1320
1321
1322
1323
1324
1325
1326
1327 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1328 1U << cfg->pf_rss_size_max :
1329 cfg->vf_rss_size_max;
1330
1331
1332
1333
1334
1335 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1336 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1337 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1338 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1339}
1340
1341
1342
1343
1344
1345static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1346{
1347 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1348 struct hclge_cfg_param_cmd *req;
1349 unsigned int i;
1350 int ret;
1351
1352 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1353 u32 offset = 0;
1354
1355 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1356 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1357 true);
1358 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1359 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1360
1361 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1362 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1363 req->offset = cpu_to_le32(offset);
1364 }
1365
1366 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1367 if (ret) {
1368 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1369 return ret;
1370 }
1371
1372 hclge_parse_cfg(hcfg, desc);
1373
1374 return 0;
1375}
1376
1377static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1378{
1379#define HCLGE_MAX_NON_TSO_BD_NUM 8U
1380
1381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1382
1383 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1384 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1385 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1386 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1387 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1388 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1389 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1390 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1391}
1392
1393static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1394 struct hclge_desc *desc)
1395{
1396 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1397 struct hclge_dev_specs_0_cmd *req0;
1398 struct hclge_dev_specs_1_cmd *req1;
1399
1400 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1401 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1402
1403 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1404 ae_dev->dev_specs.rss_ind_tbl_size =
1405 le16_to_cpu(req0->rss_ind_tbl_size);
1406 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1407 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1408 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1409 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1410 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1411 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1412 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1413 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1414}
1415
1416static void hclge_check_dev_specs(struct hclge_dev *hdev)
1417{
1418 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1419
1420 if (!dev_specs->max_non_tso_bd_num)
1421 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1422 if (!dev_specs->rss_ind_tbl_size)
1423 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1424 if (!dev_specs->rss_key_size)
1425 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1426 if (!dev_specs->max_tm_rate)
1427 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1428 if (!dev_specs->max_qset_num)
1429 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1430 if (!dev_specs->max_int_gl)
1431 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1432 if (!dev_specs->max_frm_size)
1433 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1434 if (!dev_specs->umv_size)
1435 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1436}
1437
1438static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1439{
1440 u32 reg_num = 0;
1441 int ret;
1442
1443 ret = hclge_mac_query_reg_num(hdev, ®_num);
1444 if (ret && ret != -EOPNOTSUPP)
1445 return ret;
1446
1447 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1448 return 0;
1449}
1450
1451static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452{
1453 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454 int ret;
1455 int i;
1456
1457 ret = hclge_query_mac_stats_num(hdev);
1458 if (ret)
1459 return ret;
1460
1461
1462
1463
1464 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465 hclge_set_default_dev_specs(hdev);
1466 return 0;
1467 }
1468
1469 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471 true);
1472 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1473 }
1474 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475
1476 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477 if (ret)
1478 return ret;
1479
1480 hclge_parse_dev_specs(hdev, desc);
1481 hclge_check_dev_specs(hdev);
1482
1483 return 0;
1484}
1485
1486static int hclge_get_cap(struct hclge_dev *hdev)
1487{
1488 int ret;
1489
1490 ret = hclge_query_function_status(hdev);
1491 if (ret) {
1492 dev_err(&hdev->pdev->dev,
1493 "query function status error %d.\n", ret);
1494 return ret;
1495 }
1496
1497
1498 return hclge_query_pf_resource(hdev);
1499}
1500
1501static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502{
1503#define HCLGE_MIN_TX_DESC 64
1504#define HCLGE_MIN_RX_DESC 64
1505
1506 if (!is_kdump_kernel())
1507 return;
1508
1509 dev_info(&hdev->pdev->dev,
1510 "Running kdump kernel. Using minimal resources\n");
1511
1512
1513 hdev->num_tqps = hdev->num_req_vfs + 1;
1514 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516}
1517
1518static void hclge_init_tc_config(struct hclge_dev *hdev)
1519{
1520 unsigned int i;
1521
1522 if (hdev->tc_max > HNAE3_MAX_TC ||
1523 hdev->tc_max < 1) {
1524 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1525 hdev->tc_max);
1526 hdev->tc_max = 1;
1527 }
1528
1529
1530 if (!hnae3_dev_dcb_supported(hdev)) {
1531 hdev->tc_max = 1;
1532 hdev->pfc_max = 0;
1533 } else {
1534 hdev->pfc_max = hdev->tc_max;
1535 }
1536
1537 hdev->tm_info.num_tc = 1;
1538
1539
1540 for (i = 0; i < hdev->tm_info.num_tc; i++)
1541 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1542
1543 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1544}
1545
1546static int hclge_configure(struct hclge_dev *hdev)
1547{
1548 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1549 const struct cpumask *cpumask = cpu_online_mask;
1550 struct hclge_cfg cfg;
1551 int node, ret;
1552
1553 ret = hclge_get_cfg(hdev, &cfg);
1554 if (ret)
1555 return ret;
1556
1557 hdev->base_tqp_pid = 0;
1558 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1559 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1560 hdev->rx_buf_len = cfg.rx_buf_len;
1561 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1562 hdev->hw.mac.media_type = cfg.media_type;
1563 hdev->hw.mac.phy_addr = cfg.phy_addr;
1564 hdev->num_tx_desc = cfg.tqp_desc_num;
1565 hdev->num_rx_desc = cfg.tqp_desc_num;
1566 hdev->tm_info.num_pg = 1;
1567 hdev->tc_max = cfg.tc_num;
1568 hdev->tm_info.hw_pfc_map = 0;
1569 if (cfg.umv_space)
1570 hdev->wanted_umv_size = cfg.umv_space;
1571 else
1572 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1573 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1574 hdev->gro_en = true;
1575 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1576 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1577
1578 if (hnae3_dev_fd_supported(hdev)) {
1579 hdev->fd_en = true;
1580 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1581 }
1582
1583 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1584 if (ret) {
1585 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1586 cfg.default_speed, ret);
1587 return ret;
1588 }
1589
1590 hclge_parse_link_mode(hdev, cfg.speed_ability);
1591
1592 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1593
1594 hclge_init_tc_config(hdev);
1595 hclge_init_kdump_kernel_config(hdev);
1596
1597
1598 node = dev_to_node(&hdev->pdev->dev);
1599 if (node != NUMA_NO_NODE)
1600 cpumask = cpumask_of_node(node);
1601
1602 cpumask_copy(&hdev->affinity_mask, cpumask);
1603
1604 return ret;
1605}
1606
1607static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1608 u16 tso_mss_max)
1609{
1610 struct hclge_cfg_tso_status_cmd *req;
1611 struct hclge_desc desc;
1612
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1614
1615 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1618
1619 return hclge_cmd_send(&hdev->hw, &desc, 1);
1620}
1621
1622static int hclge_config_gro(struct hclge_dev *hdev)
1623{
1624 struct hclge_cfg_gro_status_cmd *req;
1625 struct hclge_desc desc;
1626 int ret;
1627
1628 if (!hnae3_dev_gro_supported(hdev))
1629 return 0;
1630
1631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1633
1634 req->gro_en = hdev->gro_en ? 1 : 0;
1635
1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1637 if (ret)
1638 dev_err(&hdev->pdev->dev,
1639 "GRO hardware config cmd failed, ret = %d\n", ret);
1640
1641 return ret;
1642}
1643
1644static int hclge_alloc_tqps(struct hclge_dev *hdev)
1645{
1646 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1647 struct hclge_comm_tqp *tqp;
1648 int i;
1649
1650 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1651 sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1652 if (!hdev->htqp)
1653 return -ENOMEM;
1654
1655 tqp = hdev->htqp;
1656
1657 for (i = 0; i < hdev->num_tqps; i++) {
1658 tqp->dev = &hdev->pdev->dev;
1659 tqp->index = i;
1660
1661 tqp->q.ae_algo = &ae_algo;
1662 tqp->q.buf_size = hdev->rx_buf_len;
1663 tqp->q.tx_desc_num = hdev->num_tx_desc;
1664 tqp->q.rx_desc_num = hdev->num_rx_desc;
1665
1666
1667
1668
1669 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1670 tqp->q.io_base = hdev->hw.hw.io_base +
1671 HCLGE_TQP_REG_OFFSET +
1672 i * HCLGE_TQP_REG_SIZE;
1673 else
1674 tqp->q.io_base = hdev->hw.hw.io_base +
1675 HCLGE_TQP_REG_OFFSET +
1676 HCLGE_TQP_EXT_REG_OFFSET +
1677 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1678 HCLGE_TQP_REG_SIZE;
1679
1680
1681
1682
1683
1684 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1685 tqp->q.mem_base = hdev->hw.hw.mem_base +
1686 HCLGE_TQP_MEM_OFFSET(hdev, i);
1687
1688 tqp++;
1689 }
1690
1691 return 0;
1692}
1693
1694static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1695 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1696{
1697 struct hclge_tqp_map_cmd *req;
1698 struct hclge_desc desc;
1699 int ret;
1700
1701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1702
1703 req = (struct hclge_tqp_map_cmd *)desc.data;
1704 req->tqp_id = cpu_to_le16(tqp_pid);
1705 req->tqp_vf = func_id;
1706 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1707 if (!is_pf)
1708 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1709 req->tqp_vid = cpu_to_le16(tqp_vid);
1710
1711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1712 if (ret)
1713 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1714
1715 return ret;
1716}
1717
1718static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1719{
1720 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1721 struct hclge_dev *hdev = vport->back;
1722 int i, alloced;
1723
1724 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1725 alloced < num_tqps; i++) {
1726 if (!hdev->htqp[i].alloced) {
1727 hdev->htqp[i].q.handle = &vport->nic;
1728 hdev->htqp[i].q.tqp_index = alloced;
1729 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1730 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1731 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1732 hdev->htqp[i].alloced = true;
1733 alloced++;
1734 }
1735 }
1736 vport->alloc_tqps = alloced;
1737 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1738 vport->alloc_tqps / hdev->tm_info.num_tc);
1739
1740
1741 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1742 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1743
1744 return 0;
1745}
1746
1747static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1748 u16 num_tx_desc, u16 num_rx_desc)
1749
1750{
1751 struct hnae3_handle *nic = &vport->nic;
1752 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1753 struct hclge_dev *hdev = vport->back;
1754 int ret;
1755
1756 kinfo->num_tx_desc = num_tx_desc;
1757 kinfo->num_rx_desc = num_rx_desc;
1758
1759 kinfo->rx_buf_len = hdev->rx_buf_len;
1760 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1761
1762 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1763 sizeof(struct hnae3_queue *), GFP_KERNEL);
1764 if (!kinfo->tqp)
1765 return -ENOMEM;
1766
1767 ret = hclge_assign_tqp(vport, num_tqps);
1768 if (ret)
1769 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1770
1771 return ret;
1772}
1773
1774static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1775 struct hclge_vport *vport)
1776{
1777 struct hnae3_handle *nic = &vport->nic;
1778 struct hnae3_knic_private_info *kinfo;
1779 u16 i;
1780
1781 kinfo = &nic->kinfo;
1782 for (i = 0; i < vport->alloc_tqps; i++) {
1783 struct hclge_comm_tqp *q =
1784 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1785 bool is_pf;
1786 int ret;
1787
1788 is_pf = !(vport->vport_id);
1789 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1790 i, is_pf);
1791 if (ret)
1792 return ret;
1793 }
1794
1795 return 0;
1796}
1797
1798static int hclge_map_tqp(struct hclge_dev *hdev)
1799{
1800 struct hclge_vport *vport = hdev->vport;
1801 u16 i, num_vport;
1802
1803 num_vport = hdev->num_req_vfs + 1;
1804 for (i = 0; i < num_vport; i++) {
1805 int ret;
1806
1807 ret = hclge_map_tqp_to_vport(hdev, vport);
1808 if (ret)
1809 return ret;
1810
1811 vport++;
1812 }
1813
1814 return 0;
1815}
1816
1817static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1818{
1819 struct hnae3_handle *nic = &vport->nic;
1820 struct hclge_dev *hdev = vport->back;
1821 int ret;
1822
1823 nic->pdev = hdev->pdev;
1824 nic->ae_algo = &ae_algo;
1825 nic->numa_node_mask = hdev->numa_node_mask;
1826 nic->kinfo.io_base = hdev->hw.hw.io_base;
1827
1828 ret = hclge_knic_setup(vport, num_tqps,
1829 hdev->num_tx_desc, hdev->num_rx_desc);
1830 if (ret)
1831 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1832
1833 return ret;
1834}
1835
1836static int hclge_alloc_vport(struct hclge_dev *hdev)
1837{
1838 struct pci_dev *pdev = hdev->pdev;
1839 struct hclge_vport *vport;
1840 u32 tqp_main_vport;
1841 u32 tqp_per_vport;
1842 int num_vport, i;
1843 int ret;
1844
1845
1846 num_vport = hdev->num_req_vfs + 1;
1847
1848 if (hdev->num_tqps < num_vport) {
1849 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1850 hdev->num_tqps, num_vport);
1851 return -EINVAL;
1852 }
1853
1854
1855 tqp_per_vport = hdev->num_tqps / num_vport;
1856 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1857
1858 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1859 GFP_KERNEL);
1860 if (!vport)
1861 return -ENOMEM;
1862
1863 hdev->vport = vport;
1864 hdev->num_alloc_vport = num_vport;
1865
1866 if (IS_ENABLED(CONFIG_PCI_IOV))
1867 hdev->num_alloc_vfs = hdev->num_req_vfs;
1868
1869 for (i = 0; i < num_vport; i++) {
1870 vport->back = hdev;
1871 vport->vport_id = i;
1872 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1873 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1874 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1875 vport->port_base_vlan_cfg.tbl_sta = true;
1876 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1877 vport->req_vlan_fltr_en = true;
1878 INIT_LIST_HEAD(&vport->vlan_list);
1879 INIT_LIST_HEAD(&vport->uc_mac_list);
1880 INIT_LIST_HEAD(&vport->mc_mac_list);
1881 spin_lock_init(&vport->mac_list_lock);
1882
1883 if (i == 0)
1884 ret = hclge_vport_setup(vport, tqp_main_vport);
1885 else
1886 ret = hclge_vport_setup(vport, tqp_per_vport);
1887 if (ret) {
1888 dev_err(&pdev->dev,
1889 "vport setup failed for vport %d, %d\n",
1890 i, ret);
1891 return ret;
1892 }
1893
1894 vport++;
1895 }
1896
1897 return 0;
1898}
1899
1900static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1901 struct hclge_pkt_buf_alloc *buf_alloc)
1902{
1903
1904#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1905#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1906 struct hclge_tx_buff_alloc_cmd *req;
1907 struct hclge_desc desc;
1908 int ret;
1909 u8 i;
1910
1911 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1912
1913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1914 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1915 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1916
1917 req->tx_pkt_buff[i] =
1918 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1919 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1920 }
1921
1922 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1923 if (ret)
1924 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1925 ret);
1926
1927 return ret;
1928}
1929
1930static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1931 struct hclge_pkt_buf_alloc *buf_alloc)
1932{
1933 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1934
1935 if (ret)
1936 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1937
1938 return ret;
1939}
1940
1941static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1942{
1943 unsigned int i;
1944 u32 cnt = 0;
1945
1946 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1947 if (hdev->hw_tc_map & BIT(i))
1948 cnt++;
1949 return cnt;
1950}
1951
1952
1953static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1954 struct hclge_pkt_buf_alloc *buf_alloc)
1955{
1956 struct hclge_priv_buf *priv;
1957 unsigned int i;
1958 int cnt = 0;
1959
1960 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1961 priv = &buf_alloc->priv_buf[i];
1962 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1963 priv->enable)
1964 cnt++;
1965 }
1966
1967 return cnt;
1968}
1969
1970
1971static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1972 struct hclge_pkt_buf_alloc *buf_alloc)
1973{
1974 struct hclge_priv_buf *priv;
1975 unsigned int i;
1976 int cnt = 0;
1977
1978 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1979 priv = &buf_alloc->priv_buf[i];
1980 if (hdev->hw_tc_map & BIT(i) &&
1981 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1982 priv->enable)
1983 cnt++;
1984 }
1985
1986 return cnt;
1987}
1988
1989static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1990{
1991 struct hclge_priv_buf *priv;
1992 u32 rx_priv = 0;
1993 int i;
1994
1995 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1996 priv = &buf_alloc->priv_buf[i];
1997 if (priv->enable)
1998 rx_priv += priv->buf_size;
1999 }
2000 return rx_priv;
2001}
2002
2003static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2004{
2005 u32 i, total_tx_size = 0;
2006
2007 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2008 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2009
2010 return total_tx_size;
2011}
2012
2013static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2014 struct hclge_pkt_buf_alloc *buf_alloc,
2015 u32 rx_all)
2016{
2017 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2018 u32 tc_num = hclge_get_tc_num(hdev);
2019 u32 shared_buf, aligned_mps;
2020 u32 rx_priv;
2021 int i;
2022
2023 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2024
2025 if (hnae3_dev_dcb_supported(hdev))
2026 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2027 hdev->dv_buf_size;
2028 else
2029 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2030 + hdev->dv_buf_size;
2031
2032 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2033 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2034 HCLGE_BUF_SIZE_UNIT);
2035
2036 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2037 if (rx_all < rx_priv + shared_std)
2038 return false;
2039
2040 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2041 buf_alloc->s_buf.buf_size = shared_buf;
2042 if (hnae3_dev_dcb_supported(hdev)) {
2043 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2044 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2045 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2046 HCLGE_BUF_SIZE_UNIT);
2047 } else {
2048 buf_alloc->s_buf.self.high = aligned_mps +
2049 HCLGE_NON_DCB_ADDITIONAL_BUF;
2050 buf_alloc->s_buf.self.low = aligned_mps;
2051 }
2052
2053 if (hnae3_dev_dcb_supported(hdev)) {
2054 hi_thrd = shared_buf - hdev->dv_buf_size;
2055
2056 if (tc_num <= NEED_RESERVE_TC_NUM)
2057 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2058 / BUF_MAX_PERCENT;
2059
2060 if (tc_num)
2061 hi_thrd = hi_thrd / tc_num;
2062
2063 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2064 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2065 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2066 } else {
2067 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2068 lo_thrd = aligned_mps;
2069 }
2070
2071 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2072 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2073 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2074 }
2075
2076 return true;
2077}
2078
2079static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2080 struct hclge_pkt_buf_alloc *buf_alloc)
2081{
2082 u32 i, total_size;
2083
2084 total_size = hdev->pkt_buf_size;
2085
2086
2087 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2088 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089
2090 if (hdev->hw_tc_map & BIT(i)) {
2091 if (total_size < hdev->tx_buf_size)
2092 return -ENOMEM;
2093
2094 priv->tx_buf_size = hdev->tx_buf_size;
2095 } else {
2096 priv->tx_buf_size = 0;
2097 }
2098
2099 total_size -= priv->tx_buf_size;
2100 }
2101
2102 return 0;
2103}
2104
2105static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2106 struct hclge_pkt_buf_alloc *buf_alloc)
2107{
2108 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2109 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2110 unsigned int i;
2111
2112 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2113 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2114
2115 priv->enable = 0;
2116 priv->wl.low = 0;
2117 priv->wl.high = 0;
2118 priv->buf_size = 0;
2119
2120 if (!(hdev->hw_tc_map & BIT(i)))
2121 continue;
2122
2123 priv->enable = 1;
2124
2125 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2126 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2127 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2128 HCLGE_BUF_SIZE_UNIT);
2129 } else {
2130 priv->wl.low = 0;
2131 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2132 aligned_mps;
2133 }
2134
2135 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2136 }
2137
2138 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2139}
2140
2141static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2142 struct hclge_pkt_buf_alloc *buf_alloc)
2143{
2144 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2145 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2146 int i;
2147
2148
2149 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2150 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2151 unsigned int mask = BIT((unsigned int)i);
2152
2153 if (hdev->hw_tc_map & mask &&
2154 !(hdev->tm_info.hw_pfc_map & mask)) {
2155
2156 priv->wl.low = 0;
2157 priv->wl.high = 0;
2158 priv->buf_size = 0;
2159 priv->enable = 0;
2160 no_pfc_priv_num--;
2161 }
2162
2163 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2164 no_pfc_priv_num == 0)
2165 break;
2166 }
2167
2168 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2169}
2170
2171static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2172 struct hclge_pkt_buf_alloc *buf_alloc)
2173{
2174 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2175 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2176 int i;
2177
2178
2179 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2180 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2181 unsigned int mask = BIT((unsigned int)i);
2182
2183 if (hdev->hw_tc_map & mask &&
2184 hdev->tm_info.hw_pfc_map & mask) {
2185
2186 priv->wl.low = 0;
2187 priv->enable = 0;
2188 priv->wl.high = 0;
2189 priv->buf_size = 0;
2190 pfc_priv_num--;
2191 }
2192
2193 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2194 pfc_priv_num == 0)
2195 break;
2196 }
2197
2198 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2199}
2200
2201static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2202 struct hclge_pkt_buf_alloc *buf_alloc)
2203{
2204#define COMPENSATE_BUFFER 0x3C00
2205#define COMPENSATE_HALF_MPS_NUM 5
2206#define PRIV_WL_GAP 0x1800
2207
2208 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2209 u32 tc_num = hclge_get_tc_num(hdev);
2210 u32 half_mps = hdev->mps >> 1;
2211 u32 min_rx_priv;
2212 unsigned int i;
2213
2214 if (tc_num)
2215 rx_priv = rx_priv / tc_num;
2216
2217 if (tc_num <= NEED_RESERVE_TC_NUM)
2218 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2219
2220 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2221 COMPENSATE_HALF_MPS_NUM * half_mps;
2222 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2223 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2224 if (rx_priv < min_rx_priv)
2225 return false;
2226
2227 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2228 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2229
2230 priv->enable = 0;
2231 priv->wl.low = 0;
2232 priv->wl.high = 0;
2233 priv->buf_size = 0;
2234
2235 if (!(hdev->hw_tc_map & BIT(i)))
2236 continue;
2237
2238 priv->enable = 1;
2239 priv->buf_size = rx_priv;
2240 priv->wl.high = rx_priv - hdev->dv_buf_size;
2241 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2242 }
2243
2244 buf_alloc->s_buf.buf_size = 0;
2245
2246 return true;
2247}
2248
2249
2250
2251
2252
2253
2254static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2255 struct hclge_pkt_buf_alloc *buf_alloc)
2256{
2257
2258 if (!hnae3_dev_dcb_supported(hdev)) {
2259 u32 rx_all = hdev->pkt_buf_size;
2260
2261 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2262 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2263 return -ENOMEM;
2264
2265 return 0;
2266 }
2267
2268 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2269 return 0;
2270
2271 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2272 return 0;
2273
2274
2275 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2276 return 0;
2277
2278 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2279 return 0;
2280
2281 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2282 return 0;
2283
2284 return -ENOMEM;
2285}
2286
2287static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2288 struct hclge_pkt_buf_alloc *buf_alloc)
2289{
2290 struct hclge_rx_priv_buff_cmd *req;
2291 struct hclge_desc desc;
2292 int ret;
2293 int i;
2294
2295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2296 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2297
2298
2299 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2300 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2301
2302 req->buf_num[i] =
2303 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2304 req->buf_num[i] |=
2305 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2306 }
2307
2308 req->shared_buf =
2309 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2310 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2311
2312 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2313 if (ret)
2314 dev_err(&hdev->pdev->dev,
2315 "rx private buffer alloc cmd failed %d\n", ret);
2316
2317 return ret;
2318}
2319
2320static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2321 struct hclge_pkt_buf_alloc *buf_alloc)
2322{
2323 struct hclge_rx_priv_wl_buf *req;
2324 struct hclge_priv_buf *priv;
2325 struct hclge_desc desc[2];
2326 int i, j;
2327 int ret;
2328
2329 for (i = 0; i < 2; i++) {
2330 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2331 false);
2332 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2333
2334
2335 if (i == 0)
2336 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2337 else
2338 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2339
2340 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2341 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2342
2343 priv = &buf_alloc->priv_buf[idx];
2344 req->tc_wl[j].high =
2345 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2346 req->tc_wl[j].high |=
2347 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2348 req->tc_wl[j].low =
2349 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2350 req->tc_wl[j].low |=
2351 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2352 }
2353 }
2354
2355
2356 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2357 if (ret)
2358 dev_err(&hdev->pdev->dev,
2359 "rx private waterline config cmd failed %d\n",
2360 ret);
2361 return ret;
2362}
2363
2364static int hclge_common_thrd_config(struct hclge_dev *hdev,
2365 struct hclge_pkt_buf_alloc *buf_alloc)
2366{
2367 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2368 struct hclge_rx_com_thrd *req;
2369 struct hclge_desc desc[2];
2370 struct hclge_tc_thrd *tc;
2371 int i, j;
2372 int ret;
2373
2374 for (i = 0; i < 2; i++) {
2375 hclge_cmd_setup_basic_desc(&desc[i],
2376 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2377 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2378
2379
2380 if (i == 0)
2381 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2382 else
2383 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2384
2385 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2386 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2387
2388 req->com_thrd[j].high =
2389 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2390 req->com_thrd[j].high |=
2391 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2392 req->com_thrd[j].low =
2393 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2394 req->com_thrd[j].low |=
2395 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2396 }
2397 }
2398
2399
2400 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2401 if (ret)
2402 dev_err(&hdev->pdev->dev,
2403 "common threshold config cmd failed %d\n", ret);
2404 return ret;
2405}
2406
2407static int hclge_common_wl_config(struct hclge_dev *hdev,
2408 struct hclge_pkt_buf_alloc *buf_alloc)
2409{
2410 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2411 struct hclge_rx_com_wl *req;
2412 struct hclge_desc desc;
2413 int ret;
2414
2415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2416
2417 req = (struct hclge_rx_com_wl *)desc.data;
2418 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2419 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2420
2421 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2422 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2423
2424 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2425 if (ret)
2426 dev_err(&hdev->pdev->dev,
2427 "common waterline config cmd failed %d\n", ret);
2428
2429 return ret;
2430}
2431
2432int hclge_buffer_alloc(struct hclge_dev *hdev)
2433{
2434 struct hclge_pkt_buf_alloc *pkt_buf;
2435 int ret;
2436
2437 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2438 if (!pkt_buf)
2439 return -ENOMEM;
2440
2441 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2442 if (ret) {
2443 dev_err(&hdev->pdev->dev,
2444 "could not calc tx buffer size for all TCs %d\n", ret);
2445 goto out;
2446 }
2447
2448 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2449 if (ret) {
2450 dev_err(&hdev->pdev->dev,
2451 "could not alloc tx buffers %d\n", ret);
2452 goto out;
2453 }
2454
2455 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2456 if (ret) {
2457 dev_err(&hdev->pdev->dev,
2458 "could not calc rx priv buffer size for all TCs %d\n",
2459 ret);
2460 goto out;
2461 }
2462
2463 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2464 if (ret) {
2465 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2466 ret);
2467 goto out;
2468 }
2469
2470 if (hnae3_dev_dcb_supported(hdev)) {
2471 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2472 if (ret) {
2473 dev_err(&hdev->pdev->dev,
2474 "could not configure rx private waterline %d\n",
2475 ret);
2476 goto out;
2477 }
2478
2479 ret = hclge_common_thrd_config(hdev, pkt_buf);
2480 if (ret) {
2481 dev_err(&hdev->pdev->dev,
2482 "could not configure common threshold %d\n",
2483 ret);
2484 goto out;
2485 }
2486 }
2487
2488 ret = hclge_common_wl_config(hdev, pkt_buf);
2489 if (ret)
2490 dev_err(&hdev->pdev->dev,
2491 "could not configure common waterline %d\n", ret);
2492
2493out:
2494 kfree(pkt_buf);
2495 return ret;
2496}
2497
2498static int hclge_init_roce_base_info(struct hclge_vport *vport)
2499{
2500 struct hnae3_handle *roce = &vport->roce;
2501 struct hnae3_handle *nic = &vport->nic;
2502 struct hclge_dev *hdev = vport->back;
2503
2504 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2505
2506 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2507 return -EINVAL;
2508
2509 roce->rinfo.base_vector = hdev->num_nic_msi;
2510
2511 roce->rinfo.netdev = nic->kinfo.netdev;
2512 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2513 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2514
2515 roce->pdev = nic->pdev;
2516 roce->ae_algo = nic->ae_algo;
2517 roce->numa_node_mask = nic->numa_node_mask;
2518
2519 return 0;
2520}
2521
2522static int hclge_init_msi(struct hclge_dev *hdev)
2523{
2524 struct pci_dev *pdev = hdev->pdev;
2525 int vectors;
2526 int i;
2527
2528 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2529 hdev->num_msi,
2530 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2531 if (vectors < 0) {
2532 dev_err(&pdev->dev,
2533 "failed(%d) to allocate MSI/MSI-X vectors\n",
2534 vectors);
2535 return vectors;
2536 }
2537 if (vectors < hdev->num_msi)
2538 dev_warn(&hdev->pdev->dev,
2539 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2540 hdev->num_msi, vectors);
2541
2542 hdev->num_msi = vectors;
2543 hdev->num_msi_left = vectors;
2544
2545 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2546 sizeof(u16), GFP_KERNEL);
2547 if (!hdev->vector_status) {
2548 pci_free_irq_vectors(pdev);
2549 return -ENOMEM;
2550 }
2551
2552 for (i = 0; i < hdev->num_msi; i++)
2553 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2554
2555 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2556 sizeof(int), GFP_KERNEL);
2557 if (!hdev->vector_irq) {
2558 pci_free_irq_vectors(pdev);
2559 return -ENOMEM;
2560 }
2561
2562 return 0;
2563}
2564
2565static u8 hclge_check_speed_dup(u8 duplex, int speed)
2566{
2567 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2568 duplex = HCLGE_MAC_FULL;
2569
2570 return duplex;
2571}
2572
2573static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2574 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2575 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2576 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2577 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2578 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2579 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2580 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2581 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2582 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2583};
2584
2585static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2586{
2587 u16 i;
2588
2589 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2590 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2591 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2592 return 0;
2593 }
2594 }
2595
2596 return -EINVAL;
2597}
2598
2599static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2600 u8 duplex)
2601{
2602 struct hclge_config_mac_speed_dup_cmd *req;
2603 struct hclge_desc desc;
2604 u32 speed_fw;
2605 int ret;
2606
2607 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2608
2609 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2610
2611 if (duplex)
2612 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2613
2614 ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2615 if (ret) {
2616 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2617 return ret;
2618 }
2619
2620 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2621 speed_fw);
2622 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2623 1);
2624
2625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2626 if (ret) {
2627 dev_err(&hdev->pdev->dev,
2628 "mac speed/duplex config cmd failed %d.\n", ret);
2629 return ret;
2630 }
2631
2632 return 0;
2633}
2634
2635int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2636{
2637 struct hclge_mac *mac = &hdev->hw.mac;
2638 int ret;
2639
2640 duplex = hclge_check_speed_dup(duplex, speed);
2641 if (!mac->support_autoneg && mac->speed == speed &&
2642 mac->duplex == duplex)
2643 return 0;
2644
2645 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2646 if (ret)
2647 return ret;
2648
2649 hdev->hw.mac.speed = speed;
2650 hdev->hw.mac.duplex = duplex;
2651
2652 return 0;
2653}
2654
2655static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2656 u8 duplex)
2657{
2658 struct hclge_vport *vport = hclge_get_vport(handle);
2659 struct hclge_dev *hdev = vport->back;
2660
2661 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2662}
2663
2664static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2665{
2666 struct hclge_config_auto_neg_cmd *req;
2667 struct hclge_desc desc;
2668 u32 flag = 0;
2669 int ret;
2670
2671 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2672
2673 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2674 if (enable)
2675 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2676 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2677
2678 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2679 if (ret)
2680 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2681 ret);
2682
2683 return ret;
2684}
2685
2686static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2687{
2688 struct hclge_vport *vport = hclge_get_vport(handle);
2689 struct hclge_dev *hdev = vport->back;
2690
2691 if (!hdev->hw.mac.support_autoneg) {
2692 if (enable) {
2693 dev_err(&hdev->pdev->dev,
2694 "autoneg is not supported by current port\n");
2695 return -EOPNOTSUPP;
2696 } else {
2697 return 0;
2698 }
2699 }
2700
2701 return hclge_set_autoneg_en(hdev, enable);
2702}
2703
2704static int hclge_get_autoneg(struct hnae3_handle *handle)
2705{
2706 struct hclge_vport *vport = hclge_get_vport(handle);
2707 struct hclge_dev *hdev = vport->back;
2708 struct phy_device *phydev = hdev->hw.mac.phydev;
2709
2710 if (phydev)
2711 return phydev->autoneg;
2712
2713 return hdev->hw.mac.autoneg;
2714}
2715
2716static int hclge_restart_autoneg(struct hnae3_handle *handle)
2717{
2718 struct hclge_vport *vport = hclge_get_vport(handle);
2719 struct hclge_dev *hdev = vport->back;
2720 int ret;
2721
2722 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2723
2724 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2725 if (ret)
2726 return ret;
2727 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2728}
2729
2730static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2731{
2732 struct hclge_vport *vport = hclge_get_vport(handle);
2733 struct hclge_dev *hdev = vport->back;
2734
2735 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2736 return hclge_set_autoneg_en(hdev, !halt);
2737
2738 return 0;
2739}
2740
2741static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2742{
2743 struct hclge_config_fec_cmd *req;
2744 struct hclge_desc desc;
2745 int ret;
2746
2747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2748
2749 req = (struct hclge_config_fec_cmd *)desc.data;
2750 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2751 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2752 if (fec_mode & BIT(HNAE3_FEC_RS))
2753 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2754 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2755 if (fec_mode & BIT(HNAE3_FEC_BASER))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2758
2759 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2760 if (ret)
2761 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2762
2763 return ret;
2764}
2765
2766static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2767{
2768 struct hclge_vport *vport = hclge_get_vport(handle);
2769 struct hclge_dev *hdev = vport->back;
2770 struct hclge_mac *mac = &hdev->hw.mac;
2771 int ret;
2772
2773 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2774 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2775 return -EINVAL;
2776 }
2777
2778 ret = hclge_set_fec_hw(hdev, fec_mode);
2779 if (ret)
2780 return ret;
2781
2782 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2783 return 0;
2784}
2785
2786static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2787 u8 *fec_mode)
2788{
2789 struct hclge_vport *vport = hclge_get_vport(handle);
2790 struct hclge_dev *hdev = vport->back;
2791 struct hclge_mac *mac = &hdev->hw.mac;
2792
2793 if (fec_ability)
2794 *fec_ability = mac->fec_ability;
2795 if (fec_mode)
2796 *fec_mode = mac->fec_mode;
2797}
2798
2799static int hclge_mac_init(struct hclge_dev *hdev)
2800{
2801 struct hclge_mac *mac = &hdev->hw.mac;
2802 int ret;
2803
2804 hdev->support_sfp_query = true;
2805 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2806 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2807 hdev->hw.mac.duplex);
2808 if (ret)
2809 return ret;
2810
2811 if (hdev->hw.mac.support_autoneg) {
2812 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2813 if (ret)
2814 return ret;
2815 }
2816
2817 mac->link = 0;
2818
2819 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2820 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2821 if (ret)
2822 return ret;
2823 }
2824
2825 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2826 if (ret) {
2827 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2828 return ret;
2829 }
2830
2831 ret = hclge_set_default_loopback(hdev);
2832 if (ret)
2833 return ret;
2834
2835 ret = hclge_buffer_alloc(hdev);
2836 if (ret)
2837 dev_err(&hdev->pdev->dev,
2838 "allocate buffer fail, ret=%d\n", ret);
2839
2840 return ret;
2841}
2842
2843static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2844{
2845 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2846 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
2847 hdev->last_mbx_scheduled = jiffies;
2848 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2849 }
2850}
2851
2852static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2853{
2854 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2856 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
2857 hdev->last_rst_scheduled = jiffies;
2858 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2859 }
2860}
2861
2862static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2863{
2864 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2865 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2866 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2867}
2868
2869void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2870{
2871 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2872 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2873 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2874}
2875
2876static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2877{
2878 struct hclge_link_status_cmd *req;
2879 struct hclge_desc desc;
2880 int ret;
2881
2882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2883 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2884 if (ret) {
2885 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2886 ret);
2887 return ret;
2888 }
2889
2890 req = (struct hclge_link_status_cmd *)desc.data;
2891 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2892 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2893
2894 return 0;
2895}
2896
2897static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2898{
2899 struct phy_device *phydev = hdev->hw.mac.phydev;
2900
2901 *link_status = HCLGE_LINK_STATUS_DOWN;
2902
2903 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2904 return 0;
2905
2906 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2907 return 0;
2908
2909 return hclge_get_mac_link_status(hdev, link_status);
2910}
2911
2912static void hclge_push_link_status(struct hclge_dev *hdev)
2913{
2914 struct hclge_vport *vport;
2915 int ret;
2916 u16 i;
2917
2918 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2919 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2920
2921 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2922 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2923 continue;
2924
2925 ret = hclge_push_vf_link_status(vport);
2926 if (ret) {
2927 dev_err(&hdev->pdev->dev,
2928 "failed to push link status to vf%u, ret = %d\n",
2929 i, ret);
2930 }
2931 }
2932}
2933
2934static void hclge_update_link_status(struct hclge_dev *hdev)
2935{
2936 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2937 struct hnae3_handle *handle = &hdev->vport[0].nic;
2938 struct hnae3_client *rclient = hdev->roce_client;
2939 struct hnae3_client *client = hdev->nic_client;
2940 int state;
2941 int ret;
2942
2943 if (!client)
2944 return;
2945
2946 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2947 return;
2948
2949 ret = hclge_get_mac_phy_link(hdev, &state);
2950 if (ret) {
2951 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2952 return;
2953 }
2954
2955 if (state != hdev->hw.mac.link) {
2956 hdev->hw.mac.link = state;
2957 client->ops->link_status_change(handle, state);
2958 hclge_config_mac_tnl_int(hdev, state);
2959 if (rclient && rclient->ops->link_status_change)
2960 rclient->ops->link_status_change(rhandle, state);
2961
2962 hclge_push_link_status(hdev);
2963 }
2964
2965 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2966}
2967
2968static void hclge_update_speed_advertising(struct hclge_mac *mac)
2969{
2970 u32 speed_ability;
2971
2972 if (hclge_get_speed_bit(mac->speed, &speed_ability))
2973 return;
2974
2975 switch (mac->module_type) {
2976 case HNAE3_MODULE_TYPE_FIBRE_LR:
2977 hclge_convert_setting_lr(speed_ability, mac->advertising);
2978 break;
2979 case HNAE3_MODULE_TYPE_FIBRE_SR:
2980 case HNAE3_MODULE_TYPE_AOC:
2981 hclge_convert_setting_sr(speed_ability, mac->advertising);
2982 break;
2983 case HNAE3_MODULE_TYPE_CR:
2984 hclge_convert_setting_cr(speed_ability, mac->advertising);
2985 break;
2986 case HNAE3_MODULE_TYPE_KR:
2987 hclge_convert_setting_kr(speed_ability, mac->advertising);
2988 break;
2989 default:
2990 break;
2991 }
2992}
2993
2994static void hclge_update_fec_advertising(struct hclge_mac *mac)
2995{
2996 if (mac->fec_mode & BIT(HNAE3_FEC_RS))
2997 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2998 mac->advertising);
2999 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3000 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3001 mac->advertising);
3002 else
3003 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3004 mac->advertising);
3005}
3006
3007static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3008{
3009 struct hclge_mac *mac = &hdev->hw.mac;
3010 bool rx_en, tx_en;
3011
3012 switch (hdev->fc_mode_last_time) {
3013 case HCLGE_FC_RX_PAUSE:
3014 rx_en = true;
3015 tx_en = false;
3016 break;
3017 case HCLGE_FC_TX_PAUSE:
3018 rx_en = false;
3019 tx_en = true;
3020 break;
3021 case HCLGE_FC_FULL:
3022 rx_en = true;
3023 tx_en = true;
3024 break;
3025 default:
3026 rx_en = false;
3027 tx_en = false;
3028 break;
3029 }
3030
3031 linkmode_set_pause(mac->advertising, tx_en, rx_en);
3032}
3033
3034static void hclge_update_advertising(struct hclge_dev *hdev)
3035{
3036 struct hclge_mac *mac = &hdev->hw.mac;
3037
3038 linkmode_zero(mac->advertising);
3039 hclge_update_speed_advertising(mac);
3040 hclge_update_fec_advertising(mac);
3041 hclge_update_pause_advertising(hdev);
3042}
3043
3044static void hclge_update_port_capability(struct hclge_dev *hdev,
3045 struct hclge_mac *mac)
3046{
3047 if (hnae3_dev_fec_supported(hdev))
3048
3049 hclge_convert_setting_fec(mac);
3050
3051
3052
3053
3054 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3055 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3056 mac->module_type = HNAE3_MODULE_TYPE_KR;
3057 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3058 mac->module_type = HNAE3_MODULE_TYPE_TP;
3059
3060 if (mac->support_autoneg) {
3061 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3062 linkmode_copy(mac->advertising, mac->supported);
3063 } else {
3064 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3065 mac->supported);
3066 hclge_update_advertising(hdev);
3067 }
3068}
3069
3070static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3071{
3072 struct hclge_sfp_info_cmd *resp;
3073 struct hclge_desc desc;
3074 int ret;
3075
3076 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3077 resp = (struct hclge_sfp_info_cmd *)desc.data;
3078 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3079 if (ret == -EOPNOTSUPP) {
3080 dev_warn(&hdev->pdev->dev,
3081 "IMP do not support get SFP speed %d\n", ret);
3082 return ret;
3083 } else if (ret) {
3084 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3085 return ret;
3086 }
3087
3088 *speed = le32_to_cpu(resp->speed);
3089
3090 return 0;
3091}
3092
3093static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3094{
3095 struct hclge_sfp_info_cmd *resp;
3096 struct hclge_desc desc;
3097 int ret;
3098
3099 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3100 resp = (struct hclge_sfp_info_cmd *)desc.data;
3101
3102 resp->query_type = QUERY_ACTIVE_SPEED;
3103
3104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3105 if (ret == -EOPNOTSUPP) {
3106 dev_warn(&hdev->pdev->dev,
3107 "IMP does not support get SFP info %d\n", ret);
3108 return ret;
3109 } else if (ret) {
3110 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3111 return ret;
3112 }
3113
3114
3115
3116
3117 if (!le32_to_cpu(resp->speed))
3118 return 0;
3119
3120 mac->speed = le32_to_cpu(resp->speed);
3121
3122
3123
3124 if (resp->speed_ability) {
3125 mac->module_type = le32_to_cpu(resp->module_type);
3126 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3127 mac->autoneg = resp->autoneg;
3128 mac->support_autoneg = resp->autoneg_ability;
3129 mac->speed_type = QUERY_ACTIVE_SPEED;
3130 if (!resp->active_fec)
3131 mac->fec_mode = 0;
3132 else
3133 mac->fec_mode = BIT(resp->active_fec);
3134 } else {
3135 mac->speed_type = QUERY_SFP_SPEED;
3136 }
3137
3138 return 0;
3139}
3140
3141static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3142 struct ethtool_link_ksettings *cmd)
3143{
3144 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3145 struct hclge_vport *vport = hclge_get_vport(handle);
3146 struct hclge_phy_link_ksetting_0_cmd *req0;
3147 struct hclge_phy_link_ksetting_1_cmd *req1;
3148 u32 supported, advertising, lp_advertising;
3149 struct hclge_dev *hdev = vport->back;
3150 int ret;
3151
3152 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3153 true);
3154 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3155 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3156 true);
3157
3158 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3159 if (ret) {
3160 dev_err(&hdev->pdev->dev,
3161 "failed to get phy link ksetting, ret = %d.\n", ret);
3162 return ret;
3163 }
3164
3165 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3166 cmd->base.autoneg = req0->autoneg;
3167 cmd->base.speed = le32_to_cpu(req0->speed);
3168 cmd->base.duplex = req0->duplex;
3169 cmd->base.port = req0->port;
3170 cmd->base.transceiver = req0->transceiver;
3171 cmd->base.phy_address = req0->phy_address;
3172 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3173 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3174 supported = le32_to_cpu(req0->supported);
3175 advertising = le32_to_cpu(req0->advertising);
3176 lp_advertising = le32_to_cpu(req0->lp_advertising);
3177 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3178 supported);
3179 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3180 advertising);
3181 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3182 lp_advertising);
3183
3184 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3185 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3186 cmd->base.master_slave_state = req1->master_slave_state;
3187
3188 return 0;
3189}
3190
3191static int
3192hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3193 const struct ethtool_link_ksettings *cmd)
3194{
3195 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3196 struct hclge_vport *vport = hclge_get_vport(handle);
3197 struct hclge_phy_link_ksetting_0_cmd *req0;
3198 struct hclge_phy_link_ksetting_1_cmd *req1;
3199 struct hclge_dev *hdev = vport->back;
3200 u32 advertising;
3201 int ret;
3202
3203 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3204 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3205 (cmd->base.duplex != DUPLEX_HALF &&
3206 cmd->base.duplex != DUPLEX_FULL)))
3207 return -EINVAL;
3208
3209 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3210 false);
3211 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3212 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3213 false);
3214
3215 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3216 req0->autoneg = cmd->base.autoneg;
3217 req0->speed = cpu_to_le32(cmd->base.speed);
3218 req0->duplex = cmd->base.duplex;
3219 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3220 cmd->link_modes.advertising);
3221 req0->advertising = cpu_to_le32(advertising);
3222 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3223
3224 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3225 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3226
3227 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3228 if (ret) {
3229 dev_err(&hdev->pdev->dev,
3230 "failed to set phy link ksettings, ret = %d.\n", ret);
3231 return ret;
3232 }
3233
3234 hdev->hw.mac.autoneg = cmd->base.autoneg;
3235 hdev->hw.mac.speed = cmd->base.speed;
3236 hdev->hw.mac.duplex = cmd->base.duplex;
3237 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3238
3239 return 0;
3240}
3241
3242static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3243{
3244 struct ethtool_link_ksettings cmd;
3245 int ret;
3246
3247 if (!hnae3_dev_phy_imp_supported(hdev))
3248 return 0;
3249
3250 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3251 if (ret)
3252 return ret;
3253
3254 hdev->hw.mac.autoneg = cmd.base.autoneg;
3255 hdev->hw.mac.speed = cmd.base.speed;
3256 hdev->hw.mac.duplex = cmd.base.duplex;
3257
3258 return 0;
3259}
3260
3261static int hclge_tp_port_init(struct hclge_dev *hdev)
3262{
3263 struct ethtool_link_ksettings cmd;
3264
3265 if (!hnae3_dev_phy_imp_supported(hdev))
3266 return 0;
3267
3268 cmd.base.autoneg = hdev->hw.mac.autoneg;
3269 cmd.base.speed = hdev->hw.mac.speed;
3270 cmd.base.duplex = hdev->hw.mac.duplex;
3271 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3272
3273 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3274}
3275
3276static int hclge_update_port_info(struct hclge_dev *hdev)
3277{
3278 struct hclge_mac *mac = &hdev->hw.mac;
3279 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3280 int ret;
3281
3282
3283 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3284 return hclge_update_tp_port_info(hdev);
3285
3286
3287 if (!hdev->support_sfp_query)
3288 return 0;
3289
3290 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3291 ret = hclge_get_sfp_info(hdev, mac);
3292 else
3293 ret = hclge_get_sfp_speed(hdev, &speed);
3294
3295 if (ret == -EOPNOTSUPP) {
3296 hdev->support_sfp_query = false;
3297 return ret;
3298 } else if (ret) {
3299 return ret;
3300 }
3301
3302 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3303 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3304 hclge_update_port_capability(hdev, mac);
3305 return 0;
3306 }
3307 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3308 HCLGE_MAC_FULL);
3309 } else {
3310 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3311 return 0;
3312
3313
3314 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3315 }
3316}
3317
3318static int hclge_get_status(struct hnae3_handle *handle)
3319{
3320 struct hclge_vport *vport = hclge_get_vport(handle);
3321 struct hclge_dev *hdev = vport->back;
3322
3323 hclge_update_link_status(hdev);
3324
3325 return hdev->hw.mac.link;
3326}
3327
3328static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3329{
3330 if (!pci_num_vf(hdev->pdev)) {
3331 dev_err(&hdev->pdev->dev,
3332 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3333 return NULL;
3334 }
3335
3336 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3337 dev_err(&hdev->pdev->dev,
3338 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3339 vf, pci_num_vf(hdev->pdev));
3340 return NULL;
3341 }
3342
3343
3344 vf += HCLGE_VF_VPORT_START_NUM;
3345 return &hdev->vport[vf];
3346}
3347
3348static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3349 struct ifla_vf_info *ivf)
3350{
3351 struct hclge_vport *vport = hclge_get_vport(handle);
3352 struct hclge_dev *hdev = vport->back;
3353
3354 vport = hclge_get_vf_vport(hdev, vf);
3355 if (!vport)
3356 return -EINVAL;
3357
3358 ivf->vf = vf;
3359 ivf->linkstate = vport->vf_info.link_state;
3360 ivf->spoofchk = vport->vf_info.spoofchk;
3361 ivf->trusted = vport->vf_info.trusted;
3362 ivf->min_tx_rate = 0;
3363 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3364 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3365 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3366 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3367 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3368
3369 return 0;
3370}
3371
3372static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3373 int link_state)
3374{
3375 struct hclge_vport *vport = hclge_get_vport(handle);
3376 struct hclge_dev *hdev = vport->back;
3377 int link_state_old;
3378 int ret;
3379
3380 vport = hclge_get_vf_vport(hdev, vf);
3381 if (!vport)
3382 return -EINVAL;
3383
3384 link_state_old = vport->vf_info.link_state;
3385 vport->vf_info.link_state = link_state;
3386
3387 ret = hclge_push_vf_link_status(vport);
3388 if (ret) {
3389 vport->vf_info.link_state = link_state_old;
3390 dev_err(&hdev->pdev->dev,
3391 "failed to push vf%d link status, ret = %d\n", vf, ret);
3392 }
3393
3394 return ret;
3395}
3396
3397static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3398{
3399 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3400
3401
3402 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3403 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3404 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3405 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3416 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3417 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3418 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3419 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3420 hdev->rst_stats.imp_rst_cnt++;
3421 return HCLGE_VECTOR0_EVENT_RST;
3422 }
3423
3424 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3425 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3426 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3427 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3428 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3429 hdev->rst_stats.global_rst_cnt++;
3430 return HCLGE_VECTOR0_EVENT_RST;
3431 }
3432
3433
3434 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3435 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3436 return HCLGE_VECTOR0_EVENT_ERR;
3437
3438
3439 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3440 *clearval = msix_src_reg;
3441 return HCLGE_VECTOR0_EVENT_PTP;
3442 }
3443
3444
3445 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3446 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3447 *clearval = cmdq_src_reg;
3448 return HCLGE_VECTOR0_EVENT_MBX;
3449 }
3450
3451
3452 dev_info(&hdev->pdev->dev,
3453 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3454 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3455
3456 return HCLGE_VECTOR0_EVENT_OTHER;
3457}
3458
3459static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3460 u32 regclr)
3461{
3462 switch (event_type) {
3463 case HCLGE_VECTOR0_EVENT_PTP:
3464 case HCLGE_VECTOR0_EVENT_RST:
3465 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3466 break;
3467 case HCLGE_VECTOR0_EVENT_MBX:
3468 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3469 break;
3470 default:
3471 break;
3472 }
3473}
3474
3475static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3476{
3477 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3478 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3479 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3480 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3481 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3482}
3483
3484static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3485{
3486 writel(enable ? 1 : 0, vector->addr);
3487}
3488
3489static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3490{
3491 struct hclge_dev *hdev = data;
3492 unsigned long flags;
3493 u32 clearval = 0;
3494 u32 event_cause;
3495
3496 hclge_enable_vector(&hdev->misc_vector, false);
3497 event_cause = hclge_check_event_cause(hdev, &clearval);
3498
3499
3500 switch (event_cause) {
3501 case HCLGE_VECTOR0_EVENT_ERR:
3502 hclge_errhand_task_schedule(hdev);
3503 break;
3504 case HCLGE_VECTOR0_EVENT_RST:
3505 hclge_reset_task_schedule(hdev);
3506 break;
3507 case HCLGE_VECTOR0_EVENT_PTP:
3508 spin_lock_irqsave(&hdev->ptp->lock, flags);
3509 hclge_ptp_clean_tx_hwts(hdev);
3510 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3511 break;
3512 case HCLGE_VECTOR0_EVENT_MBX:
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522 hclge_mbx_task_schedule(hdev);
3523 break;
3524 default:
3525 dev_warn(&hdev->pdev->dev,
3526 "received unknown or unhandled event of vector0\n");
3527 break;
3528 }
3529
3530 hclge_clear_event_cause(hdev, event_cause, clearval);
3531
3532
3533 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3534 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3535 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3536 hclge_enable_vector(&hdev->misc_vector, true);
3537
3538 return IRQ_HANDLED;
3539}
3540
3541static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3542{
3543 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3544 dev_warn(&hdev->pdev->dev,
3545 "vector(vector_id %d) has been freed.\n", vector_id);
3546 return;
3547 }
3548
3549 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3550 hdev->num_msi_left += 1;
3551 hdev->num_msi_used -= 1;
3552}
3553
3554static void hclge_get_misc_vector(struct hclge_dev *hdev)
3555{
3556 struct hclge_misc_vector *vector = &hdev->misc_vector;
3557
3558 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3559
3560 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3561 hdev->vector_status[0] = 0;
3562
3563 hdev->num_msi_left -= 1;
3564 hdev->num_msi_used += 1;
3565}
3566
3567static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3568{
3569 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3570 &hdev->affinity_mask);
3571}
3572
3573static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3574{
3575 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3576}
3577
3578static int hclge_misc_irq_init(struct hclge_dev *hdev)
3579{
3580 int ret;
3581
3582 hclge_get_misc_vector(hdev);
3583
3584
3585 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3586 HCLGE_NAME, pci_name(hdev->pdev));
3587 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3588 0, hdev->misc_vector.name, hdev);
3589 if (ret) {
3590 hclge_free_vector(hdev, 0);
3591 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3592 hdev->misc_vector.vector_irq);
3593 }
3594
3595 return ret;
3596}
3597
3598static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3599{
3600 free_irq(hdev->misc_vector.vector_irq, hdev);
3601 hclge_free_vector(hdev, 0);
3602}
3603
3604int hclge_notify_client(struct hclge_dev *hdev,
3605 enum hnae3_reset_notify_type type)
3606{
3607 struct hnae3_handle *handle = &hdev->vport[0].nic;
3608 struct hnae3_client *client = hdev->nic_client;
3609 int ret;
3610
3611 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3612 return 0;
3613
3614 if (!client->ops->reset_notify)
3615 return -EOPNOTSUPP;
3616
3617 ret = client->ops->reset_notify(handle, type);
3618 if (ret)
3619 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3620 type, ret);
3621
3622 return ret;
3623}
3624
3625static int hclge_notify_roce_client(struct hclge_dev *hdev,
3626 enum hnae3_reset_notify_type type)
3627{
3628 struct hnae3_handle *handle = &hdev->vport[0].roce;
3629 struct hnae3_client *client = hdev->roce_client;
3630 int ret;
3631
3632 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3633 return 0;
3634
3635 if (!client->ops->reset_notify)
3636 return -EOPNOTSUPP;
3637
3638 ret = client->ops->reset_notify(handle, type);
3639 if (ret)
3640 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3641 type, ret);
3642
3643 return ret;
3644}
3645
3646static int hclge_reset_wait(struct hclge_dev *hdev)
3647{
3648#define HCLGE_RESET_WATI_MS 100
3649#define HCLGE_RESET_WAIT_CNT 350
3650
3651 u32 val, reg, reg_bit;
3652 u32 cnt = 0;
3653
3654 switch (hdev->reset_type) {
3655 case HNAE3_IMP_RESET:
3656 reg = HCLGE_GLOBAL_RESET_REG;
3657 reg_bit = HCLGE_IMP_RESET_BIT;
3658 break;
3659 case HNAE3_GLOBAL_RESET:
3660 reg = HCLGE_GLOBAL_RESET_REG;
3661 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3662 break;
3663 case HNAE3_FUNC_RESET:
3664 reg = HCLGE_FUN_RST_ING;
3665 reg_bit = HCLGE_FUN_RST_ING_B;
3666 break;
3667 default:
3668 dev_err(&hdev->pdev->dev,
3669 "Wait for unsupported reset type: %d\n",
3670 hdev->reset_type);
3671 return -EINVAL;
3672 }
3673
3674 val = hclge_read_dev(&hdev->hw, reg);
3675 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3676 msleep(HCLGE_RESET_WATI_MS);
3677 val = hclge_read_dev(&hdev->hw, reg);
3678 cnt++;
3679 }
3680
3681 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3682 dev_warn(&hdev->pdev->dev,
3683 "Wait for reset timeout: %d\n", hdev->reset_type);
3684 return -EBUSY;
3685 }
3686
3687 return 0;
3688}
3689
3690static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3691{
3692 struct hclge_vf_rst_cmd *req;
3693 struct hclge_desc desc;
3694
3695 req = (struct hclge_vf_rst_cmd *)desc.data;
3696 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3697 req->dest_vfid = func_id;
3698
3699 if (reset)
3700 req->vf_rst = 0x1;
3701
3702 return hclge_cmd_send(&hdev->hw, &desc, 1);
3703}
3704
3705static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3706{
3707 int i;
3708
3709 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3710 struct hclge_vport *vport = &hdev->vport[i];
3711 int ret;
3712
3713
3714 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3715 if (ret) {
3716 dev_err(&hdev->pdev->dev,
3717 "set vf(%u) rst failed %d!\n",
3718 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3719 ret);
3720 return ret;
3721 }
3722
3723 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3724 continue;
3725
3726
3727
3728
3729
3730 ret = hclge_inform_reset_assert_to_vf(vport);
3731 if (ret)
3732 dev_warn(&hdev->pdev->dev,
3733 "inform reset to vf(%u) failed %d!\n",
3734 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3735 ret);
3736 }
3737
3738 return 0;
3739}
3740
3741static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3742{
3743 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3744 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3745 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3746 return;
3747
3748 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3749 HCLGE_MBX_SCHED_TIMEOUT))
3750 dev_warn(&hdev->pdev->dev,
3751 "mbx service task is scheduled after %ums on cpu%u!\n",
3752 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3753 smp_processor_id());
3754
3755 hclge_mbx_handler(hdev);
3756
3757 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3758}
3759
3760static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3761{
3762 struct hclge_pf_rst_sync_cmd *req;
3763 struct hclge_desc desc;
3764 int cnt = 0;
3765 int ret;
3766
3767 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3768 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3769
3770 do {
3771
3772 hclge_mailbox_service_task(hdev);
3773
3774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3775
3776
3777
3778 if (ret == -EOPNOTSUPP) {
3779 msleep(HCLGE_RESET_SYNC_TIME);
3780 return;
3781 } else if (ret) {
3782 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3783 ret);
3784 return;
3785 } else if (req->all_vf_ready) {
3786 return;
3787 }
3788 msleep(HCLGE_PF_RESET_SYNC_TIME);
3789 hclge_comm_cmd_reuse_desc(&desc, true);
3790 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3791
3792 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3793}
3794
3795void hclge_report_hw_error(struct hclge_dev *hdev,
3796 enum hnae3_hw_error_type type)
3797{
3798 struct hnae3_client *client = hdev->nic_client;
3799
3800 if (!client || !client->ops->process_hw_error ||
3801 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3802 return;
3803
3804 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3805}
3806
3807static void hclge_handle_imp_error(struct hclge_dev *hdev)
3808{
3809 u32 reg_val;
3810
3811 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3812 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3813 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3814 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3815 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3816 }
3817
3818 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3819 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3820 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3821 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3822 }
3823}
3824
3825int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3826{
3827 struct hclge_desc desc;
3828 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3829 int ret;
3830
3831 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3832 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3833 req->fun_reset_vfid = func_id;
3834
3835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3836 if (ret)
3837 dev_err(&hdev->pdev->dev,
3838 "send function reset cmd fail, status =%d\n", ret);
3839
3840 return ret;
3841}
3842
3843static void hclge_do_reset(struct hclge_dev *hdev)
3844{
3845 struct hnae3_handle *handle = &hdev->vport[0].nic;
3846 struct pci_dev *pdev = hdev->pdev;
3847 u32 val;
3848
3849 if (hclge_get_hw_reset_stat(handle)) {
3850 dev_info(&pdev->dev, "hardware reset not finish\n");
3851 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3852 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3853 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3854 return;
3855 }
3856
3857 switch (hdev->reset_type) {
3858 case HNAE3_IMP_RESET:
3859 dev_info(&pdev->dev, "IMP reset requested\n");
3860 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3861 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3862 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3863 break;
3864 case HNAE3_GLOBAL_RESET:
3865 dev_info(&pdev->dev, "global reset requested\n");
3866 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3867 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3868 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3869 break;
3870 case HNAE3_FUNC_RESET:
3871 dev_info(&pdev->dev, "PF reset requested\n");
3872
3873 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3874 hclge_reset_task_schedule(hdev);
3875 break;
3876 default:
3877 dev_warn(&pdev->dev,
3878 "unsupported reset type: %d\n", hdev->reset_type);
3879 break;
3880 }
3881}
3882
3883static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3884 unsigned long *addr)
3885{
3886 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3887 struct hclge_dev *hdev = ae_dev->priv;
3888
3889
3890 if (test_bit(HNAE3_IMP_RESET, addr)) {
3891 rst_level = HNAE3_IMP_RESET;
3892 clear_bit(HNAE3_IMP_RESET, addr);
3893 clear_bit(HNAE3_GLOBAL_RESET, addr);
3894 clear_bit(HNAE3_FUNC_RESET, addr);
3895 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3896 rst_level = HNAE3_GLOBAL_RESET;
3897 clear_bit(HNAE3_GLOBAL_RESET, addr);
3898 clear_bit(HNAE3_FUNC_RESET, addr);
3899 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3900 rst_level = HNAE3_FUNC_RESET;
3901 clear_bit(HNAE3_FUNC_RESET, addr);
3902 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3903 rst_level = HNAE3_FLR_RESET;
3904 clear_bit(HNAE3_FLR_RESET, addr);
3905 }
3906
3907 if (hdev->reset_type != HNAE3_NONE_RESET &&
3908 rst_level < hdev->reset_type)
3909 return HNAE3_NONE_RESET;
3910
3911 return rst_level;
3912}
3913
3914static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3915{
3916 u32 clearval = 0;
3917
3918 switch (hdev->reset_type) {
3919 case HNAE3_IMP_RESET:
3920 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3921 break;
3922 case HNAE3_GLOBAL_RESET:
3923 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3924 break;
3925 default:
3926 break;
3927 }
3928
3929 if (!clearval)
3930 return;
3931
3932
3933
3934
3935 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3936 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3937 clearval);
3938
3939 hclge_enable_vector(&hdev->misc_vector, true);
3940}
3941
3942static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3943{
3944 u32 reg_val;
3945
3946 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
3947 if (enable)
3948 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
3949 else
3950 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
3951
3952 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
3953}
3954
3955static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3956{
3957 int ret;
3958
3959 ret = hclge_set_all_vf_rst(hdev, true);
3960 if (ret)
3961 return ret;
3962
3963 hclge_func_reset_sync_vf(hdev);
3964
3965 return 0;
3966}
3967
3968static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3969{
3970 u32 reg_val;
3971 int ret = 0;
3972
3973 switch (hdev->reset_type) {
3974 case HNAE3_FUNC_RESET:
3975 ret = hclge_func_reset_notify_vf(hdev);
3976 if (ret)
3977 return ret;
3978
3979 ret = hclge_func_reset_cmd(hdev, 0);
3980 if (ret) {
3981 dev_err(&hdev->pdev->dev,
3982 "asserting function reset fail %d!\n", ret);
3983 return ret;
3984 }
3985
3986
3987
3988
3989
3990
3991 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3992 hdev->rst_stats.pf_rst_cnt++;
3993 break;
3994 case HNAE3_FLR_RESET:
3995 ret = hclge_func_reset_notify_vf(hdev);
3996 if (ret)
3997 return ret;
3998 break;
3999 case HNAE3_IMP_RESET:
4000 hclge_handle_imp_error(hdev);
4001 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4002 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4003 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4004 break;
4005 default:
4006 break;
4007 }
4008
4009
4010 msleep(HCLGE_RESET_SYNC_TIME);
4011 hclge_reset_handshake(hdev, true);
4012 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4013
4014 return ret;
4015}
4016
4017static void hclge_show_rst_info(struct hclge_dev *hdev)
4018{
4019 char *buf;
4020
4021 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4022 if (!buf)
4023 return;
4024
4025 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4026
4027 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4028
4029 kfree(buf);
4030}
4031
4032static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4033{
4034#define MAX_RESET_FAIL_CNT 5
4035
4036 if (hdev->reset_pending) {
4037 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4038 hdev->reset_pending);
4039 return true;
4040 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4041 HCLGE_RESET_INT_M) {
4042 dev_info(&hdev->pdev->dev,
4043 "reset failed because new reset interrupt\n");
4044 hclge_clear_reset_cause(hdev);
4045 return false;
4046 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4047 hdev->rst_stats.reset_fail_cnt++;
4048 set_bit(hdev->reset_type, &hdev->reset_pending);
4049 dev_info(&hdev->pdev->dev,
4050 "re-schedule reset task(%u)\n",
4051 hdev->rst_stats.reset_fail_cnt);
4052 return true;
4053 }
4054
4055 hclge_clear_reset_cause(hdev);
4056
4057
4058 hclge_reset_handshake(hdev, true);
4059
4060 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4061
4062 hclge_show_rst_info(hdev);
4063
4064 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4065
4066 return false;
4067}
4068
4069static void hclge_update_reset_level(struct hclge_dev *hdev)
4070{
4071 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4072 enum hnae3_reset_type reset_level;
4073
4074
4075
4076
4077
4078 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4079
4080
4081
4082
4083
4084 reset_level = hclge_get_reset_level(ae_dev,
4085 &hdev->default_reset_request);
4086 if (reset_level != HNAE3_NONE_RESET)
4087 set_bit(reset_level, &hdev->reset_request);
4088}
4089
4090static int hclge_set_rst_done(struct hclge_dev *hdev)
4091{
4092 struct hclge_pf_rst_done_cmd *req;
4093 struct hclge_desc desc;
4094 int ret;
4095
4096 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4097 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4098 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4099
4100 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4101
4102
4103
4104
4105 if (ret == -EOPNOTSUPP) {
4106 dev_warn(&hdev->pdev->dev,
4107 "current firmware does not support command(0x%x)!\n",
4108 HCLGE_OPC_PF_RST_DONE);
4109 return 0;
4110 } else if (ret) {
4111 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4112 ret);
4113 }
4114
4115 return ret;
4116}
4117
4118static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4119{
4120 int ret = 0;
4121
4122 switch (hdev->reset_type) {
4123 case HNAE3_FUNC_RESET:
4124 case HNAE3_FLR_RESET:
4125 ret = hclge_set_all_vf_rst(hdev, false);
4126 break;
4127 case HNAE3_GLOBAL_RESET:
4128 case HNAE3_IMP_RESET:
4129 ret = hclge_set_rst_done(hdev);
4130 break;
4131 default:
4132 break;
4133 }
4134
4135
4136 hclge_reset_handshake(hdev, false);
4137
4138 return ret;
4139}
4140
4141static int hclge_reset_stack(struct hclge_dev *hdev)
4142{
4143 int ret;
4144
4145 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4146 if (ret)
4147 return ret;
4148
4149 ret = hclge_reset_ae_dev(hdev->ae_dev);
4150 if (ret)
4151 return ret;
4152
4153 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4154}
4155
4156static int hclge_reset_prepare(struct hclge_dev *hdev)
4157{
4158 int ret;
4159
4160 hdev->rst_stats.reset_cnt++;
4161
4162 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4163 if (ret)
4164 return ret;
4165
4166 rtnl_lock();
4167 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4168 rtnl_unlock();
4169 if (ret)
4170 return ret;
4171
4172 return hclge_reset_prepare_wait(hdev);
4173}
4174
4175static int hclge_reset_rebuild(struct hclge_dev *hdev)
4176{
4177 int ret;
4178
4179 hdev->rst_stats.hw_reset_done_cnt++;
4180
4181 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4182 if (ret)
4183 return ret;
4184
4185 rtnl_lock();
4186 ret = hclge_reset_stack(hdev);
4187 rtnl_unlock();
4188 if (ret)
4189 return ret;
4190
4191 hclge_clear_reset_cause(hdev);
4192
4193 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4194
4195
4196
4197 if (ret &&
4198 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4199 return ret;
4200
4201 ret = hclge_reset_prepare_up(hdev);
4202 if (ret)
4203 return ret;
4204
4205 rtnl_lock();
4206 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4207 rtnl_unlock();
4208 if (ret)
4209 return ret;
4210
4211 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4212 if (ret)
4213 return ret;
4214
4215 hdev->last_reset_time = jiffies;
4216 hdev->rst_stats.reset_fail_cnt = 0;
4217 hdev->rst_stats.reset_done_cnt++;
4218 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4219
4220 hclge_update_reset_level(hdev);
4221
4222 return 0;
4223}
4224
4225static void hclge_reset(struct hclge_dev *hdev)
4226{
4227 if (hclge_reset_prepare(hdev))
4228 goto err_reset;
4229
4230 if (hclge_reset_wait(hdev))
4231 goto err_reset;
4232
4233 if (hclge_reset_rebuild(hdev))
4234 goto err_reset;
4235
4236 return;
4237
4238err_reset:
4239 if (hclge_reset_err_handle(hdev))
4240 hclge_reset_task_schedule(hdev);
4241}
4242
4243static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4244{
4245 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4246 struct hclge_dev *hdev = ae_dev->priv;
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263 if (time_before(jiffies, (hdev->last_reset_time +
4264 HCLGE_RESET_INTERVAL))) {
4265 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4266 return;
4267 }
4268
4269 if (hdev->default_reset_request) {
4270 hdev->reset_level =
4271 hclge_get_reset_level(ae_dev,
4272 &hdev->default_reset_request);
4273 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4274 hdev->reset_level = HNAE3_FUNC_RESET;
4275 }
4276
4277 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4278 hdev->reset_level);
4279
4280
4281 set_bit(hdev->reset_level, &hdev->reset_request);
4282 hclge_reset_task_schedule(hdev);
4283
4284 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4285 hdev->reset_level++;
4286}
4287
4288static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4289 enum hnae3_reset_type rst_type)
4290{
4291 struct hclge_dev *hdev = ae_dev->priv;
4292
4293 set_bit(rst_type, &hdev->default_reset_request);
4294}
4295
4296static void hclge_reset_timer(struct timer_list *t)
4297{
4298 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4299
4300
4301
4302
4303 if (!hdev->default_reset_request)
4304 return;
4305
4306 dev_info(&hdev->pdev->dev,
4307 "triggering reset in reset timer\n");
4308 hclge_reset_event(hdev->pdev, NULL);
4309}
4310
4311static void hclge_reset_subtask(struct hclge_dev *hdev)
4312{
4313 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324 hdev->last_reset_time = jiffies;
4325 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4326 if (hdev->reset_type != HNAE3_NONE_RESET)
4327 hclge_reset(hdev);
4328
4329
4330 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4331 if (hdev->reset_type != HNAE3_NONE_RESET)
4332 hclge_do_reset(hdev);
4333
4334 hdev->reset_type = HNAE3_NONE_RESET;
4335}
4336
4337static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4338{
4339 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4340 enum hnae3_reset_type reset_type;
4341
4342 if (ae_dev->hw_err_reset_req) {
4343 reset_type = hclge_get_reset_level(ae_dev,
4344 &ae_dev->hw_err_reset_req);
4345 hclge_set_def_reset_request(ae_dev, reset_type);
4346 }
4347
4348 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4349 ae_dev->ops->reset_event(hdev->pdev, NULL);
4350
4351
4352 hclge_enable_vector(&hdev->misc_vector, true);
4353}
4354
4355static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4356{
4357 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4358
4359 ae_dev->hw_err_reset_req = 0;
4360
4361 if (hclge_find_error_source(hdev)) {
4362 hclge_handle_error_info_log(ae_dev);
4363 hclge_handle_mac_tnl(hdev);
4364 }
4365
4366 hclge_handle_err_reset_request(hdev);
4367}
4368
4369static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4370{
4371 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4372 struct device *dev = &hdev->pdev->dev;
4373 u32 msix_sts_reg;
4374
4375 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4376 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4377 if (hclge_handle_hw_msix_error
4378 (hdev, &hdev->default_reset_request))
4379 dev_info(dev, "received msix interrupt 0x%x\n",
4380 msix_sts_reg);
4381 }
4382
4383 hclge_handle_hw_ras_error(ae_dev);
4384
4385 hclge_handle_err_reset_request(hdev);
4386}
4387
4388static void hclge_errhand_service_task(struct hclge_dev *hdev)
4389{
4390 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4391 return;
4392
4393 if (hnae3_dev_ras_imp_supported(hdev))
4394 hclge_handle_err_recovery(hdev);
4395 else
4396 hclge_misc_err_recovery(hdev);
4397}
4398
4399static void hclge_reset_service_task(struct hclge_dev *hdev)
4400{
4401 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4402 return;
4403
4404 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4405 HCLGE_RESET_SCHED_TIMEOUT))
4406 dev_warn(&hdev->pdev->dev,
4407 "reset service task is scheduled after %ums on cpu%u!\n",
4408 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4409 smp_processor_id());
4410
4411 down(&hdev->reset_sem);
4412 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4413
4414 hclge_reset_subtask(hdev);
4415
4416 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4417 up(&hdev->reset_sem);
4418}
4419
4420static void hclge_update_vport_alive(struct hclge_dev *hdev)
4421{
4422 int i;
4423
4424
4425 for (i = 1; i < hdev->num_alloc_vport; i++) {
4426 struct hclge_vport *vport = &hdev->vport[i];
4427
4428 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4429 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4430
4431
4432 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4433 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4434 }
4435}
4436
4437static void hclge_periodic_service_task(struct hclge_dev *hdev)
4438{
4439 unsigned long delta = round_jiffies_relative(HZ);
4440
4441 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4442 return;
4443
4444
4445
4446
4447 hclge_update_link_status(hdev);
4448 hclge_sync_mac_table(hdev);
4449 hclge_sync_promisc_mode(hdev);
4450 hclge_sync_fd_table(hdev);
4451
4452 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4453 delta = jiffies - hdev->last_serv_processed;
4454
4455 if (delta < round_jiffies_relative(HZ)) {
4456 delta = round_jiffies_relative(HZ) - delta;
4457 goto out;
4458 }
4459 }
4460
4461 hdev->serv_processed_cnt++;
4462 hclge_update_vport_alive(hdev);
4463
4464 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4465 hdev->last_serv_processed = jiffies;
4466 goto out;
4467 }
4468
4469 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4470 hclge_update_stats_for_all(hdev);
4471
4472 hclge_update_port_info(hdev);
4473 hclge_sync_vlan_filter(hdev);
4474
4475 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4476 hclge_rfs_filter_expire(hdev);
4477
4478 hdev->last_serv_processed = jiffies;
4479
4480out:
4481 hclge_task_schedule(hdev, delta);
4482}
4483
4484static void hclge_ptp_service_task(struct hclge_dev *hdev)
4485{
4486 unsigned long flags;
4487
4488 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4489 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4490 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4491 return;
4492
4493
4494 spin_lock_irqsave(&hdev->ptp->lock, flags);
4495
4496
4497
4498
4499 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4500 hclge_ptp_clean_tx_hwts(hdev);
4501
4502 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4503}
4504
4505static void hclge_service_task(struct work_struct *work)
4506{
4507 struct hclge_dev *hdev =
4508 container_of(work, struct hclge_dev, service_task.work);
4509
4510 hclge_errhand_service_task(hdev);
4511 hclge_reset_service_task(hdev);
4512 hclge_ptp_service_task(hdev);
4513 hclge_mailbox_service_task(hdev);
4514 hclge_periodic_service_task(hdev);
4515
4516
4517
4518
4519
4520 hclge_errhand_service_task(hdev);
4521 hclge_reset_service_task(hdev);
4522 hclge_mailbox_service_task(hdev);
4523}
4524
4525struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4526{
4527
4528 if (!handle->client)
4529 return container_of(handle, struct hclge_vport, nic);
4530 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4531 return container_of(handle, struct hclge_vport, roce);
4532 else
4533 return container_of(handle, struct hclge_vport, nic);
4534}
4535
4536static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4537 struct hnae3_vector_info *vector_info)
4538{
4539#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4540
4541 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4542
4543
4544 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4545 vector_info->io_addr = hdev->hw.hw.io_base +
4546 HCLGE_VECTOR_REG_BASE +
4547 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4548 else
4549 vector_info->io_addr = hdev->hw.hw.io_base +
4550 HCLGE_VECTOR_EXT_REG_BASE +
4551 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4552 HCLGE_VECTOR_REG_OFFSET_H +
4553 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4554 HCLGE_VECTOR_REG_OFFSET;
4555
4556 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4557 hdev->vector_irq[idx] = vector_info->vector;
4558}
4559
4560static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4561 struct hnae3_vector_info *vector_info)
4562{
4563 struct hclge_vport *vport = hclge_get_vport(handle);
4564 struct hnae3_vector_info *vector = vector_info;
4565 struct hclge_dev *hdev = vport->back;
4566 int alloc = 0;
4567 u16 i = 0;
4568 u16 j;
4569
4570 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4571 vector_num = min(hdev->num_msi_left, vector_num);
4572
4573 for (j = 0; j < vector_num; j++) {
4574 while (++i < hdev->num_nic_msi) {
4575 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4576 hclge_get_vector_info(hdev, i, vector);
4577 vector++;
4578 alloc++;
4579
4580 break;
4581 }
4582 }
4583 }
4584 hdev->num_msi_left -= alloc;
4585 hdev->num_msi_used += alloc;
4586
4587 return alloc;
4588}
4589
4590static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4591{
4592 int i;
4593
4594 for (i = 0; i < hdev->num_msi; i++)
4595 if (vector == hdev->vector_irq[i])
4596 return i;
4597
4598 return -EINVAL;
4599}
4600
4601static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4602{
4603 struct hclge_vport *vport = hclge_get_vport(handle);
4604 struct hclge_dev *hdev = vport->back;
4605 int vector_id;
4606
4607 vector_id = hclge_get_vector_index(hdev, vector);
4608 if (vector_id < 0) {
4609 dev_err(&hdev->pdev->dev,
4610 "Get vector index fail. vector = %d\n", vector);
4611 return vector_id;
4612 }
4613
4614 hclge_free_vector(hdev, vector_id);
4615
4616 return 0;
4617}
4618
4619static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4620 u8 *key, u8 *hfunc)
4621{
4622 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4623 struct hclge_vport *vport = hclge_get_vport(handle);
4624 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4625
4626 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4627
4628 hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4629 ae_dev->dev_specs.rss_ind_tbl_size);
4630
4631 return 0;
4632}
4633
4634static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4635 const u8 *key, const u8 hfunc)
4636{
4637 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4638 struct hclge_vport *vport = hclge_get_vport(handle);
4639 struct hclge_dev *hdev = vport->back;
4640 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4641 int ret, i;
4642
4643 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4644 if (ret) {
4645 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4646 return ret;
4647 }
4648
4649
4650 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4651 rss_cfg->rss_indirection_tbl[i] = indir[i];
4652
4653
4654 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4655 rss_cfg->rss_indirection_tbl);
4656}
4657
4658static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4659 struct ethtool_rxnfc *nfc)
4660{
4661 struct hclge_vport *vport = hclge_get_vport(handle);
4662 struct hclge_dev *hdev = vport->back;
4663 int ret;
4664
4665 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4666 &hdev->rss_cfg, nfc);
4667 if (ret) {
4668 dev_err(&hdev->pdev->dev,
4669 "failed to set rss tuple, ret = %d.\n", ret);
4670 return ret;
4671 }
4672
4673 hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
4674 return 0;
4675}
4676
4677static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4678 struct ethtool_rxnfc *nfc)
4679{
4680 struct hclge_vport *vport = hclge_get_vport(handle);
4681 u8 tuple_sets;
4682 int ret;
4683
4684 nfc->data = 0;
4685
4686 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4687 &tuple_sets);
4688 if (ret || !tuple_sets)
4689 return ret;
4690
4691 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4692
4693 return 0;
4694}
4695
4696static int hclge_get_tc_size(struct hnae3_handle *handle)
4697{
4698 struct hclge_vport *vport = hclge_get_vport(handle);
4699 struct hclge_dev *hdev = vport->back;
4700
4701 return hdev->pf_rss_size_max;
4702}
4703
4704static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4705{
4706 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4707 struct hclge_vport *vport = hdev->vport;
4708 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4709 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4710 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4711 struct hnae3_tc_info *tc_info;
4712 u16 roundup_size;
4713 u16 rss_size;
4714 int i;
4715
4716 tc_info = &vport->nic.kinfo.tc_info;
4717 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4718 rss_size = tc_info->tqp_count[i];
4719 tc_valid[i] = 0;
4720
4721 if (!(hdev->hw_tc_map & BIT(i)))
4722 continue;
4723
4724
4725
4726
4727
4728 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4729 rss_size == 0) {
4730 dev_err(&hdev->pdev->dev,
4731 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4732 rss_size);
4733 return -EINVAL;
4734 }
4735
4736 roundup_size = roundup_pow_of_two(rss_size);
4737 roundup_size = ilog2(roundup_size);
4738
4739 tc_valid[i] = 1;
4740 tc_size[i] = roundup_size;
4741 tc_offset[i] = tc_info->tqp_offset[i];
4742 }
4743
4744 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4745 tc_size);
4746}
4747
4748int hclge_rss_init_hw(struct hclge_dev *hdev)
4749{
4750 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4751 u8 *key = hdev->rss_cfg.rss_hash_key;
4752 u8 hfunc = hdev->rss_cfg.rss_algo;
4753 int ret;
4754
4755 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4756 rss_indir);
4757 if (ret)
4758 return ret;
4759
4760 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4761 if (ret)
4762 return ret;
4763
4764 ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
4765 &hdev->hw.hw, true,
4766 &hdev->rss_cfg);
4767 if (ret)
4768 return ret;
4769
4770 return hclge_init_rss_tc_mode(hdev);
4771}
4772
4773int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4774 int vector_id, bool en,
4775 struct hnae3_ring_chain_node *ring_chain)
4776{
4777 struct hclge_dev *hdev = vport->back;
4778 struct hnae3_ring_chain_node *node;
4779 struct hclge_desc desc;
4780 struct hclge_ctrl_vector_chain_cmd *req =
4781 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4782 enum hclge_comm_cmd_status status;
4783 enum hclge_opcode_type op;
4784 u16 tqp_type_and_id;
4785 int i;
4786
4787 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4788 hclge_cmd_setup_basic_desc(&desc, op, false);
4789 req->int_vector_id_l = hnae3_get_field(vector_id,
4790 HCLGE_VECTOR_ID_L_M,
4791 HCLGE_VECTOR_ID_L_S);
4792 req->int_vector_id_h = hnae3_get_field(vector_id,
4793 HCLGE_VECTOR_ID_H_M,
4794 HCLGE_VECTOR_ID_H_S);
4795
4796 i = 0;
4797 for (node = ring_chain; node; node = node->next) {
4798 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4799 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4800 HCLGE_INT_TYPE_S,
4801 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4802 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4803 HCLGE_TQP_ID_S, node->tqp_index);
4804 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4805 HCLGE_INT_GL_IDX_S,
4806 hnae3_get_field(node->int_gl_idx,
4807 HNAE3_RING_GL_IDX_M,
4808 HNAE3_RING_GL_IDX_S));
4809 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4810 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4811 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4812 req->vfid = vport->vport_id;
4813
4814 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4815 if (status) {
4816 dev_err(&hdev->pdev->dev,
4817 "Map TQP fail, status is %d.\n",
4818 status);
4819 return -EIO;
4820 }
4821 i = 0;
4822
4823 hclge_cmd_setup_basic_desc(&desc,
4824 op,
4825 false);
4826 req->int_vector_id_l =
4827 hnae3_get_field(vector_id,
4828 HCLGE_VECTOR_ID_L_M,
4829 HCLGE_VECTOR_ID_L_S);
4830 req->int_vector_id_h =
4831 hnae3_get_field(vector_id,
4832 HCLGE_VECTOR_ID_H_M,
4833 HCLGE_VECTOR_ID_H_S);
4834 }
4835 }
4836
4837 if (i > 0) {
4838 req->int_cause_num = i;
4839 req->vfid = vport->vport_id;
4840 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4841 if (status) {
4842 dev_err(&hdev->pdev->dev,
4843 "Map TQP fail, status is %d.\n", status);
4844 return -EIO;
4845 }
4846 }
4847
4848 return 0;
4849}
4850
4851static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4852 struct hnae3_ring_chain_node *ring_chain)
4853{
4854 struct hclge_vport *vport = hclge_get_vport(handle);
4855 struct hclge_dev *hdev = vport->back;
4856 int vector_id;
4857
4858 vector_id = hclge_get_vector_index(hdev, vector);
4859 if (vector_id < 0) {
4860 dev_err(&hdev->pdev->dev,
4861 "failed to get vector index. vector=%d\n", vector);
4862 return vector_id;
4863 }
4864
4865 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4866}
4867
4868static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4869 struct hnae3_ring_chain_node *ring_chain)
4870{
4871 struct hclge_vport *vport = hclge_get_vport(handle);
4872 struct hclge_dev *hdev = vport->back;
4873 int vector_id, ret;
4874
4875 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4876 return 0;
4877
4878 vector_id = hclge_get_vector_index(hdev, vector);
4879 if (vector_id < 0) {
4880 dev_err(&handle->pdev->dev,
4881 "Get vector index fail. ret =%d\n", vector_id);
4882 return vector_id;
4883 }
4884
4885 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4886 if (ret)
4887 dev_err(&handle->pdev->dev,
4888 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4889 vector_id, ret);
4890
4891 return ret;
4892}
4893
4894static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4895 bool en_uc, bool en_mc, bool en_bc)
4896{
4897 struct hclge_vport *vport = &hdev->vport[vf_id];
4898 struct hnae3_handle *handle = &vport->nic;
4899 struct hclge_promisc_cfg_cmd *req;
4900 struct hclge_desc desc;
4901 bool uc_tx_en = en_uc;
4902 u8 promisc_cfg = 0;
4903 int ret;
4904
4905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4906
4907 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4908 req->vf_id = vf_id;
4909
4910 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4911 uc_tx_en = false;
4912
4913 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4914 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4915 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4916 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4917 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4918 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4919 req->extend_promisc = promisc_cfg;
4920
4921
4922 promisc_cfg = 0;
4923 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4924 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4925 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4926 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4927 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4928 req->promisc = promisc_cfg;
4929
4930 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4931 if (ret)
4932 dev_err(&hdev->pdev->dev,
4933 "failed to set vport %u promisc mode, ret = %d.\n",
4934 vf_id, ret);
4935
4936 return ret;
4937}
4938
4939int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4940 bool en_mc_pmc, bool en_bc_pmc)
4941{
4942 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4943 en_uc_pmc, en_mc_pmc, en_bc_pmc);
4944}
4945
4946static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4947 bool en_mc_pmc)
4948{
4949 struct hclge_vport *vport = hclge_get_vport(handle);
4950 struct hclge_dev *hdev = vport->back;
4951 bool en_bc_pmc = true;
4952
4953
4954
4955
4956
4957 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4958 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4959
4960 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4961 en_bc_pmc);
4962}
4963
4964static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4965{
4966 struct hclge_vport *vport = hclge_get_vport(handle);
4967
4968 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
4969}
4970
4971static void hclge_sync_fd_state(struct hclge_dev *hdev)
4972{
4973 if (hlist_empty(&hdev->fd_rule_list))
4974 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4975}
4976
4977static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
4978{
4979 if (!test_bit(location, hdev->fd_bmap)) {
4980 set_bit(location, hdev->fd_bmap);
4981 hdev->hclge_fd_rule_num++;
4982 }
4983}
4984
4985static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
4986{
4987 if (test_bit(location, hdev->fd_bmap)) {
4988 clear_bit(location, hdev->fd_bmap);
4989 hdev->hclge_fd_rule_num--;
4990 }
4991}
4992
4993static void hclge_fd_free_node(struct hclge_dev *hdev,
4994 struct hclge_fd_rule *rule)
4995{
4996 hlist_del(&rule->rule_node);
4997 kfree(rule);
4998 hclge_sync_fd_state(hdev);
4999}
5000
5001static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5002 struct hclge_fd_rule *old_rule,
5003 struct hclge_fd_rule *new_rule,
5004 enum HCLGE_FD_NODE_STATE state)
5005{
5006 switch (state) {
5007 case HCLGE_FD_TO_ADD:
5008 case HCLGE_FD_ACTIVE:
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018 new_rule->rule_node.next = old_rule->rule_node.next;
5019 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5020 memcpy(old_rule, new_rule, sizeof(*old_rule));
5021 kfree(new_rule);
5022 break;
5023 case HCLGE_FD_DELETED:
5024 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5025 hclge_fd_free_node(hdev, old_rule);
5026 break;
5027 case HCLGE_FD_TO_DEL:
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039 if (old_rule->state == HCLGE_FD_TO_ADD) {
5040 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5041 hclge_fd_free_node(hdev, old_rule);
5042 return;
5043 }
5044 old_rule->state = HCLGE_FD_TO_DEL;
5045 break;
5046 }
5047}
5048
5049static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5050 u16 location,
5051 struct hclge_fd_rule **parent)
5052{
5053 struct hclge_fd_rule *rule;
5054 struct hlist_node *node;
5055
5056 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5057 if (rule->location == location)
5058 return rule;
5059 else if (rule->location > location)
5060 return NULL;
5061
5062
5063
5064 *parent = rule;
5065 }
5066
5067 return NULL;
5068}
5069
5070
5071static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5072 struct hclge_fd_rule *rule,
5073 struct hclge_fd_rule *parent)
5074{
5075 INIT_HLIST_NODE(&rule->rule_node);
5076
5077 if (parent)
5078 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5079 else
5080 hlist_add_head(&rule->rule_node, hlist);
5081}
5082
5083static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5084 struct hclge_fd_user_def_cfg *cfg)
5085{
5086 struct hclge_fd_user_def_cfg_cmd *req;
5087 struct hclge_desc desc;
5088 u16 data = 0;
5089 int ret;
5090
5091 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5092
5093 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5094
5095 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5096 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5097 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5098 req->ol2_cfg = cpu_to_le16(data);
5099
5100 data = 0;
5101 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5102 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5103 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5104 req->ol3_cfg = cpu_to_le16(data);
5105
5106 data = 0;
5107 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5108 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5109 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5110 req->ol4_cfg = cpu_to_le16(data);
5111
5112 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5113 if (ret)
5114 dev_err(&hdev->pdev->dev,
5115 "failed to set fd user def data, ret= %d\n", ret);
5116 return ret;
5117}
5118
5119static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5120{
5121 int ret;
5122
5123 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5124 return;
5125
5126 if (!locked)
5127 spin_lock_bh(&hdev->fd_rule_lock);
5128
5129 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5130 if (ret)
5131 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5132
5133 if (!locked)
5134 spin_unlock_bh(&hdev->fd_rule_lock);
5135}
5136
5137static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5138 struct hclge_fd_rule *rule)
5139{
5140 struct hlist_head *hlist = &hdev->fd_rule_list;
5141 struct hclge_fd_rule *fd_rule, *parent = NULL;
5142 struct hclge_fd_user_def_info *info, *old_info;
5143 struct hclge_fd_user_def_cfg *cfg;
5144
5145 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5146 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5147 return 0;
5148
5149
5150 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5151 info = &rule->ep.user_def;
5152
5153 if (!cfg->ref_cnt || cfg->offset == info->offset)
5154 return 0;
5155
5156 if (cfg->ref_cnt > 1)
5157 goto error;
5158
5159 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5160 if (fd_rule) {
5161 old_info = &fd_rule->ep.user_def;
5162 if (info->layer == old_info->layer)
5163 return 0;
5164 }
5165
5166error:
5167 dev_err(&hdev->pdev->dev,
5168 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5169 info->layer + 1);
5170 return -ENOSPC;
5171}
5172
5173static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5174 struct hclge_fd_rule *rule)
5175{
5176 struct hclge_fd_user_def_cfg *cfg;
5177
5178 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5179 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5180 return;
5181
5182 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5183 if (!cfg->ref_cnt) {
5184 cfg->offset = rule->ep.user_def.offset;
5185 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5186 }
5187 cfg->ref_cnt++;
5188}
5189
5190static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5191 struct hclge_fd_rule *rule)
5192{
5193 struct hclge_fd_user_def_cfg *cfg;
5194
5195 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5196 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5197 return;
5198
5199 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5200 if (!cfg->ref_cnt)
5201 return;
5202
5203 cfg->ref_cnt--;
5204 if (!cfg->ref_cnt) {
5205 cfg->offset = 0;
5206 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5207 }
5208}
5209
5210static void hclge_update_fd_list(struct hclge_dev *hdev,
5211 enum HCLGE_FD_NODE_STATE state, u16 location,
5212 struct hclge_fd_rule *new_rule)
5213{
5214 struct hlist_head *hlist = &hdev->fd_rule_list;
5215 struct hclge_fd_rule *fd_rule, *parent = NULL;
5216
5217 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5218 if (fd_rule) {
5219 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5220 if (state == HCLGE_FD_ACTIVE)
5221 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5222 hclge_sync_fd_user_def_cfg(hdev, true);
5223
5224 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5225 return;
5226 }
5227
5228
5229
5230
5231 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5232 dev_warn(&hdev->pdev->dev,
5233 "failed to delete fd rule %u, it's inexistent\n",
5234 location);
5235 return;
5236 }
5237
5238 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5239 hclge_sync_fd_user_def_cfg(hdev, true);
5240
5241 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5242 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5243
5244 if (state == HCLGE_FD_TO_ADD) {
5245 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5246 hclge_task_schedule(hdev, 0);
5247 }
5248}
5249
5250static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5251{
5252 struct hclge_get_fd_mode_cmd *req;
5253 struct hclge_desc desc;
5254 int ret;
5255
5256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5257
5258 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5259
5260 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5261 if (ret) {
5262 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5263 return ret;
5264 }
5265
5266 *fd_mode = req->mode;
5267
5268 return ret;
5269}
5270
5271static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5272 u32 *stage1_entry_num,
5273 u32 *stage2_entry_num,
5274 u16 *stage1_counter_num,
5275 u16 *stage2_counter_num)
5276{
5277 struct hclge_get_fd_allocation_cmd *req;
5278 struct hclge_desc desc;
5279 int ret;
5280
5281 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5282
5283 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5284
5285 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5286 if (ret) {
5287 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5288 ret);
5289 return ret;
5290 }
5291
5292 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5293 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5294 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5295 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5296
5297 return ret;
5298}
5299
5300static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5301 enum HCLGE_FD_STAGE stage_num)
5302{
5303 struct hclge_set_fd_key_config_cmd *req;
5304 struct hclge_fd_key_cfg *stage;
5305 struct hclge_desc desc;
5306 int ret;
5307
5308 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5309
5310 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5311 stage = &hdev->fd_cfg.key_cfg[stage_num];
5312 req->stage = stage_num;
5313 req->key_select = stage->key_sel;
5314 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5315 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5316 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5317 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5318 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5319 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5320
5321 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5322 if (ret)
5323 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5324
5325 return ret;
5326}
5327
5328static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5329{
5330 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5331
5332 spin_lock_bh(&hdev->fd_rule_lock);
5333 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5334 spin_unlock_bh(&hdev->fd_rule_lock);
5335
5336 hclge_fd_set_user_def_cmd(hdev, cfg);
5337}
5338
5339static int hclge_init_fd_config(struct hclge_dev *hdev)
5340{
5341#define LOW_2_WORDS 0x03
5342 struct hclge_fd_key_cfg *key_cfg;
5343 int ret;
5344
5345 if (!hnae3_dev_fd_supported(hdev))
5346 return 0;
5347
5348 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5349 if (ret)
5350 return ret;
5351
5352 switch (hdev->fd_cfg.fd_mode) {
5353 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5354 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5355 break;
5356 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5357 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5358 break;
5359 default:
5360 dev_err(&hdev->pdev->dev,
5361 "Unsupported flow director mode %u\n",
5362 hdev->fd_cfg.fd_mode);
5363 return -EOPNOTSUPP;
5364 }
5365
5366 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5367 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5368 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5369 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5370 key_cfg->outer_sipv6_word_en = 0;
5371 key_cfg->outer_dipv6_word_en = 0;
5372
5373 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5374 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5375 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5376 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5377
5378
5379 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5380 key_cfg->tuple_active |=
5381 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5382 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5383 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5384 }
5385
5386
5387
5388
5389 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5390
5391 ret = hclge_get_fd_allocation(hdev,
5392 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5393 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5394 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5395 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5396 if (ret)
5397 return ret;
5398
5399 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5400}
5401
5402static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5403 int loc, u8 *key, bool is_add)
5404{
5405 struct hclge_fd_tcam_config_1_cmd *req1;
5406 struct hclge_fd_tcam_config_2_cmd *req2;
5407 struct hclge_fd_tcam_config_3_cmd *req3;
5408 struct hclge_desc desc[3];
5409 int ret;
5410
5411 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5412 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5413 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5414 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5415 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5416
5417 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5418 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5419 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5420
5421 req1->stage = stage;
5422 req1->xy_sel = sel_x ? 1 : 0;
5423 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5424 req1->index = cpu_to_le32(loc);
5425 req1->entry_vld = sel_x ? is_add : 0;
5426
5427 if (key) {
5428 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5429 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5430 sizeof(req2->tcam_data));
5431 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5432 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5433 }
5434
5435 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5436 if (ret)
5437 dev_err(&hdev->pdev->dev,
5438 "config tcam key fail, ret=%d\n",
5439 ret);
5440
5441 return ret;
5442}
5443
5444static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5445 struct hclge_fd_ad_data *action)
5446{
5447 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5448 struct hclge_fd_ad_config_cmd *req;
5449 struct hclge_desc desc;
5450 u64 ad_data = 0;
5451 int ret;
5452
5453 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5454
5455 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5456 req->index = cpu_to_le32(loc);
5457 req->stage = stage;
5458
5459 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5460 action->write_rule_id_to_bd);
5461 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5462 action->rule_id);
5463 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5464 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5465 action->override_tc);
5466 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5467 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5468 }
5469 ad_data <<= 32;
5470 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5471 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5472 action->forward_to_direct_queue);
5473 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5474 action->queue_id);
5475 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5476 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5477 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5478 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5479 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5480 action->counter_id);
5481
5482 req->ad_data = cpu_to_le64(ad_data);
5483 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5484 if (ret)
5485 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5486
5487 return ret;
5488}
5489
5490static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5491 struct hclge_fd_rule *rule)
5492{
5493 int offset, moffset, ip_offset;
5494 enum HCLGE_FD_KEY_OPT key_opt;
5495 u16 tmp_x_s, tmp_y_s;
5496 u32 tmp_x_l, tmp_y_l;
5497 u8 *p = (u8 *)rule;
5498 int i;
5499
5500 if (rule->unused_tuple & BIT(tuple_bit))
5501 return true;
5502
5503 key_opt = tuple_key_info[tuple_bit].key_opt;
5504 offset = tuple_key_info[tuple_bit].offset;
5505 moffset = tuple_key_info[tuple_bit].moffset;
5506
5507 switch (key_opt) {
5508 case KEY_OPT_U8:
5509 calc_x(*key_x, p[offset], p[moffset]);
5510 calc_y(*key_y, p[offset], p[moffset]);
5511
5512 return true;
5513 case KEY_OPT_LE16:
5514 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5515 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5516 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5517 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5518
5519 return true;
5520 case KEY_OPT_LE32:
5521 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5522 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5523 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5524 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5525
5526 return true;
5527 case KEY_OPT_MAC:
5528 for (i = 0; i < ETH_ALEN; i++) {
5529 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5530 p[moffset + i]);
5531 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5532 p[moffset + i]);
5533 }
5534
5535 return true;
5536 case KEY_OPT_IP:
5537 ip_offset = IPV4_INDEX * sizeof(u32);
5538 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5539 *(u32 *)(&p[moffset + ip_offset]));
5540 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5541 *(u32 *)(&p[moffset + ip_offset]));
5542 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5543 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5544
5545 return true;
5546 default:
5547 return false;
5548 }
5549}
5550
5551static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5552 u8 vf_id, u8 network_port_id)
5553{
5554 u32 port_number = 0;
5555
5556 if (port_type == HOST_PORT) {
5557 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5558 pf_id);
5559 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5560 vf_id);
5561 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5562 } else {
5563 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5564 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5565 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5566 }
5567
5568 return port_number;
5569}
5570
5571static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5572 __le32 *key_x, __le32 *key_y,
5573 struct hclge_fd_rule *rule)
5574{
5575 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5576 u8 cur_pos = 0, tuple_size, shift_bits;
5577 unsigned int i;
5578
5579 for (i = 0; i < MAX_META_DATA; i++) {
5580 tuple_size = meta_data_key_info[i].key_length;
5581 tuple_bit = key_cfg->meta_data_active & BIT(i);
5582
5583 switch (tuple_bit) {
5584 case BIT(ROCE_TYPE):
5585 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5586 cur_pos += tuple_size;
5587 break;
5588 case BIT(DST_VPORT):
5589 port_number = hclge_get_port_number(HOST_PORT, 0,
5590 rule->vf_id, 0);
5591 hnae3_set_field(meta_data,
5592 GENMASK(cur_pos + tuple_size, cur_pos),
5593 cur_pos, port_number);
5594 cur_pos += tuple_size;
5595 break;
5596 default:
5597 break;
5598 }
5599 }
5600
5601 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5602 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5603 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5604
5605 *key_x = cpu_to_le32(tmp_x << shift_bits);
5606 *key_y = cpu_to_le32(tmp_y << shift_bits);
5607}
5608
5609
5610
5611
5612
5613static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5614 struct hclge_fd_rule *rule)
5615{
5616 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5617 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5618 u8 *cur_key_x, *cur_key_y;
5619 u8 meta_data_region;
5620 u8 tuple_size;
5621 int ret;
5622 u32 i;
5623
5624 memset(key_x, 0, sizeof(key_x));
5625 memset(key_y, 0, sizeof(key_y));
5626 cur_key_x = key_x;
5627 cur_key_y = key_y;
5628
5629 for (i = 0; i < MAX_TUPLE; i++) {
5630 bool tuple_valid;
5631
5632 tuple_size = tuple_key_info[i].key_length / 8;
5633 if (!(key_cfg->tuple_active & BIT(i)))
5634 continue;
5635
5636 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5637 cur_key_y, rule);
5638 if (tuple_valid) {
5639 cur_key_x += tuple_size;
5640 cur_key_y += tuple_size;
5641 }
5642 }
5643
5644 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5645 MAX_META_DATA_LENGTH / 8;
5646
5647 hclge_fd_convert_meta_data(key_cfg,
5648 (__le32 *)(key_x + meta_data_region),
5649 (__le32 *)(key_y + meta_data_region),
5650 rule);
5651
5652 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5653 true);
5654 if (ret) {
5655 dev_err(&hdev->pdev->dev,
5656 "fd key_y config fail, loc=%u, ret=%d\n",
5657 rule->queue_id, ret);
5658 return ret;
5659 }
5660
5661 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5662 true);
5663 if (ret)
5664 dev_err(&hdev->pdev->dev,
5665 "fd key_x config fail, loc=%u, ret=%d\n",
5666 rule->queue_id, ret);
5667 return ret;
5668}
5669
5670static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5671 struct hclge_fd_rule *rule)
5672{
5673 struct hclge_vport *vport = hdev->vport;
5674 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5675 struct hclge_fd_ad_data ad_data;
5676
5677 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5678 ad_data.ad_id = rule->location;
5679
5680 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5681 ad_data.drop_packet = true;
5682 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5683 ad_data.override_tc = true;
5684 ad_data.queue_id =
5685 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5686 ad_data.tc_size =
5687 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5688 } else {
5689 ad_data.forward_to_direct_queue = true;
5690 ad_data.queue_id = rule->queue_id;
5691 }
5692
5693 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5694 ad_data.use_counter = true;
5695 ad_data.counter_id = rule->vf_id %
5696 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5697 } else {
5698 ad_data.use_counter = false;
5699 ad_data.counter_id = 0;
5700 }
5701
5702 ad_data.use_next_stage = false;
5703 ad_data.next_input_key = 0;
5704
5705 ad_data.write_rule_id_to_bd = true;
5706 ad_data.rule_id = rule->location;
5707
5708 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5709}
5710
5711static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5712 u32 *unused_tuple)
5713{
5714 if (!spec || !unused_tuple)
5715 return -EINVAL;
5716
5717 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5718
5719 if (!spec->ip4src)
5720 *unused_tuple |= BIT(INNER_SRC_IP);
5721
5722 if (!spec->ip4dst)
5723 *unused_tuple |= BIT(INNER_DST_IP);
5724
5725 if (!spec->psrc)
5726 *unused_tuple |= BIT(INNER_SRC_PORT);
5727
5728 if (!spec->pdst)
5729 *unused_tuple |= BIT(INNER_DST_PORT);
5730
5731 if (!spec->tos)
5732 *unused_tuple |= BIT(INNER_IP_TOS);
5733
5734 return 0;
5735}
5736
5737static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5738 u32 *unused_tuple)
5739{
5740 if (!spec || !unused_tuple)
5741 return -EINVAL;
5742
5743 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5744 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5745
5746 if (!spec->ip4src)
5747 *unused_tuple |= BIT(INNER_SRC_IP);
5748
5749 if (!spec->ip4dst)
5750 *unused_tuple |= BIT(INNER_DST_IP);
5751
5752 if (!spec->tos)
5753 *unused_tuple |= BIT(INNER_IP_TOS);
5754
5755 if (!spec->proto)
5756 *unused_tuple |= BIT(INNER_IP_PROTO);
5757
5758 if (spec->l4_4_bytes)
5759 return -EOPNOTSUPP;
5760
5761 if (spec->ip_ver != ETH_RX_NFC_IP4)
5762 return -EOPNOTSUPP;
5763
5764 return 0;
5765}
5766
5767static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5768 u32 *unused_tuple)
5769{
5770 if (!spec || !unused_tuple)
5771 return -EINVAL;
5772
5773 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5774
5775
5776 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5777 *unused_tuple |= BIT(INNER_SRC_IP);
5778
5779 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5780 *unused_tuple |= BIT(INNER_DST_IP);
5781
5782 if (!spec->psrc)
5783 *unused_tuple |= BIT(INNER_SRC_PORT);
5784
5785 if (!spec->pdst)
5786 *unused_tuple |= BIT(INNER_DST_PORT);
5787
5788 if (!spec->tclass)
5789 *unused_tuple |= BIT(INNER_IP_TOS);
5790
5791 return 0;
5792}
5793
5794static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5795 u32 *unused_tuple)
5796{
5797 if (!spec || !unused_tuple)
5798 return -EINVAL;
5799
5800 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5801 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5802
5803
5804 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5805 *unused_tuple |= BIT(INNER_SRC_IP);
5806
5807 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5808 *unused_tuple |= BIT(INNER_DST_IP);
5809
5810 if (!spec->l4_proto)
5811 *unused_tuple |= BIT(INNER_IP_PROTO);
5812
5813 if (!spec->tclass)
5814 *unused_tuple |= BIT(INNER_IP_TOS);
5815
5816 if (spec->l4_4_bytes)
5817 return -EOPNOTSUPP;
5818
5819 return 0;
5820}
5821
5822static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5823{
5824 if (!spec || !unused_tuple)
5825 return -EINVAL;
5826
5827 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5828 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5829 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5830
5831 if (is_zero_ether_addr(spec->h_source))
5832 *unused_tuple |= BIT(INNER_SRC_MAC);
5833
5834 if (is_zero_ether_addr(spec->h_dest))
5835 *unused_tuple |= BIT(INNER_DST_MAC);
5836
5837 if (!spec->h_proto)
5838 *unused_tuple |= BIT(INNER_ETH_TYPE);
5839
5840 return 0;
5841}
5842
5843static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5844 struct ethtool_rx_flow_spec *fs,
5845 u32 *unused_tuple)
5846{
5847 if (fs->flow_type & FLOW_EXT) {
5848 if (fs->h_ext.vlan_etype) {
5849 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5850 return -EOPNOTSUPP;
5851 }
5852
5853 if (!fs->h_ext.vlan_tci)
5854 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5855
5856 if (fs->m_ext.vlan_tci &&
5857 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5858 dev_err(&hdev->pdev->dev,
5859 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5860 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5861 return -EINVAL;
5862 }
5863 } else {
5864 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5865 }
5866
5867 if (fs->flow_type & FLOW_MAC_EXT) {
5868 if (hdev->fd_cfg.fd_mode !=
5869 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5870 dev_err(&hdev->pdev->dev,
5871 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5872 return -EOPNOTSUPP;
5873 }
5874
5875 if (is_zero_ether_addr(fs->h_ext.h_dest))
5876 *unused_tuple |= BIT(INNER_DST_MAC);
5877 else
5878 *unused_tuple &= ~BIT(INNER_DST_MAC);
5879 }
5880
5881 return 0;
5882}
5883
5884static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
5885 struct hclge_fd_user_def_info *info)
5886{
5887 switch (flow_type) {
5888 case ETHER_FLOW:
5889 info->layer = HCLGE_FD_USER_DEF_L2;
5890 *unused_tuple &= ~BIT(INNER_L2_RSV);
5891 break;
5892 case IP_USER_FLOW:
5893 case IPV6_USER_FLOW:
5894 info->layer = HCLGE_FD_USER_DEF_L3;
5895 *unused_tuple &= ~BIT(INNER_L3_RSV);
5896 break;
5897 case TCP_V4_FLOW:
5898 case UDP_V4_FLOW:
5899 case TCP_V6_FLOW:
5900 case UDP_V6_FLOW:
5901 info->layer = HCLGE_FD_USER_DEF_L4;
5902 *unused_tuple &= ~BIT(INNER_L4_RSV);
5903 break;
5904 default:
5905 return -EOPNOTSUPP;
5906 }
5907
5908 return 0;
5909}
5910
5911static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
5912{
5913 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
5914}
5915
5916static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
5917 struct ethtool_rx_flow_spec *fs,
5918 u32 *unused_tuple,
5919 struct hclge_fd_user_def_info *info)
5920{
5921 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
5922 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5923 u16 data, offset, data_mask, offset_mask;
5924 int ret;
5925
5926 info->layer = HCLGE_FD_USER_DEF_NONE;
5927 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5928
5929 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
5930 return 0;
5931
5932
5933
5934
5935 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
5936 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
5937 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
5938 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
5939
5940 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
5941 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5942 return -EOPNOTSUPP;
5943 }
5944
5945 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
5946 dev_err(&hdev->pdev->dev,
5947 "user-def offset[%u] should be no more than %u\n",
5948 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
5949 return -EINVAL;
5950 }
5951
5952 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
5953 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
5954 return -EINVAL;
5955 }
5956
5957 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
5958 if (ret) {
5959 dev_err(&hdev->pdev->dev,
5960 "unsupported flow type for user-def bytes, ret = %d\n",
5961 ret);
5962 return ret;
5963 }
5964
5965 info->data = data;
5966 info->data_mask = data_mask;
5967 info->offset = offset;
5968
5969 return 0;
5970}
5971
5972static int hclge_fd_check_spec(struct hclge_dev *hdev,
5973 struct ethtool_rx_flow_spec *fs,
5974 u32 *unused_tuple,
5975 struct hclge_fd_user_def_info *info)
5976{
5977 u32 flow_type;
5978 int ret;
5979
5980 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5981 dev_err(&hdev->pdev->dev,
5982 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5983 fs->location,
5984 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5985 return -EINVAL;
5986 }
5987
5988 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
5989 if (ret)
5990 return ret;
5991
5992 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5993 switch (flow_type) {
5994 case SCTP_V4_FLOW:
5995 case TCP_V4_FLOW:
5996 case UDP_V4_FLOW:
5997 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5998 unused_tuple);
5999 break;
6000 case IP_USER_FLOW:
6001 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6002 unused_tuple);
6003 break;
6004 case SCTP_V6_FLOW:
6005 case TCP_V6_FLOW:
6006 case UDP_V6_FLOW:
6007 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6008 unused_tuple);
6009 break;
6010 case IPV6_USER_FLOW:
6011 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6012 unused_tuple);
6013 break;
6014 case ETHER_FLOW:
6015 if (hdev->fd_cfg.fd_mode !=
6016 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6017 dev_err(&hdev->pdev->dev,
6018 "ETHER_FLOW is not supported in current fd mode!\n");
6019 return -EOPNOTSUPP;
6020 }
6021
6022 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6023 unused_tuple);
6024 break;
6025 default:
6026 dev_err(&hdev->pdev->dev,
6027 "unsupported protocol type, protocol type = %#x\n",
6028 flow_type);
6029 return -EOPNOTSUPP;
6030 }
6031
6032 if (ret) {
6033 dev_err(&hdev->pdev->dev,
6034 "failed to check flow union tuple, ret = %d\n",
6035 ret);
6036 return ret;
6037 }
6038
6039 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6040}
6041
6042static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6043 struct ethtool_rx_flow_spec *fs,
6044 struct hclge_fd_rule *rule, u8 ip_proto)
6045{
6046 rule->tuples.src_ip[IPV4_INDEX] =
6047 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6048 rule->tuples_mask.src_ip[IPV4_INDEX] =
6049 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6050
6051 rule->tuples.dst_ip[IPV4_INDEX] =
6052 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6053 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6054 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6055
6056 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6057 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6058
6059 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6060 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6061
6062 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6063 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6064
6065 rule->tuples.ether_proto = ETH_P_IP;
6066 rule->tuples_mask.ether_proto = 0xFFFF;
6067
6068 rule->tuples.ip_proto = ip_proto;
6069 rule->tuples_mask.ip_proto = 0xFF;
6070}
6071
6072static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6073 struct ethtool_rx_flow_spec *fs,
6074 struct hclge_fd_rule *rule)
6075{
6076 rule->tuples.src_ip[IPV4_INDEX] =
6077 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6078 rule->tuples_mask.src_ip[IPV4_INDEX] =
6079 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6080
6081 rule->tuples.dst_ip[IPV4_INDEX] =
6082 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6083 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6084 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6085
6086 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6087 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6088
6089 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6090 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6091
6092 rule->tuples.ether_proto = ETH_P_IP;
6093 rule->tuples_mask.ether_proto = 0xFFFF;
6094}
6095
6096static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6097 struct ethtool_rx_flow_spec *fs,
6098 struct hclge_fd_rule *rule, u8 ip_proto)
6099{
6100 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6101 IPV6_SIZE);
6102 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6103 IPV6_SIZE);
6104
6105 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6106 IPV6_SIZE);
6107 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6108 IPV6_SIZE);
6109
6110 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6111 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6112
6113 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6114 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6115
6116 rule->tuples.ether_proto = ETH_P_IPV6;
6117 rule->tuples_mask.ether_proto = 0xFFFF;
6118
6119 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6120 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6121
6122 rule->tuples.ip_proto = ip_proto;
6123 rule->tuples_mask.ip_proto = 0xFF;
6124}
6125
6126static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6127 struct ethtool_rx_flow_spec *fs,
6128 struct hclge_fd_rule *rule)
6129{
6130 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6131 IPV6_SIZE);
6132 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6133 IPV6_SIZE);
6134
6135 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6136 IPV6_SIZE);
6137 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6138 IPV6_SIZE);
6139
6140 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6141 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6142
6143 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6144 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6145
6146 rule->tuples.ether_proto = ETH_P_IPV6;
6147 rule->tuples_mask.ether_proto = 0xFFFF;
6148}
6149
6150static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6151 struct ethtool_rx_flow_spec *fs,
6152 struct hclge_fd_rule *rule)
6153{
6154 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6155 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6156
6157 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6158 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6159
6160 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6161 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6162}
6163
6164static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6165 struct hclge_fd_rule *rule)
6166{
6167 switch (info->layer) {
6168 case HCLGE_FD_USER_DEF_L2:
6169 rule->tuples.l2_user_def = info->data;
6170 rule->tuples_mask.l2_user_def = info->data_mask;
6171 break;
6172 case HCLGE_FD_USER_DEF_L3:
6173 rule->tuples.l3_user_def = info->data;
6174 rule->tuples_mask.l3_user_def = info->data_mask;
6175 break;
6176 case HCLGE_FD_USER_DEF_L4:
6177 rule->tuples.l4_user_def = (u32)info->data << 16;
6178 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6179 break;
6180 default:
6181 break;
6182 }
6183
6184 rule->ep.user_def = *info;
6185}
6186
6187static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6188 struct ethtool_rx_flow_spec *fs,
6189 struct hclge_fd_rule *rule,
6190 struct hclge_fd_user_def_info *info)
6191{
6192 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6193
6194 switch (flow_type) {
6195 case SCTP_V4_FLOW:
6196 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6197 break;
6198 case TCP_V4_FLOW:
6199 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6200 break;
6201 case UDP_V4_FLOW:
6202 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6203 break;
6204 case IP_USER_FLOW:
6205 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6206 break;
6207 case SCTP_V6_FLOW:
6208 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6209 break;
6210 case TCP_V6_FLOW:
6211 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6212 break;
6213 case UDP_V6_FLOW:
6214 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6215 break;
6216 case IPV6_USER_FLOW:
6217 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6218 break;
6219 case ETHER_FLOW:
6220 hclge_fd_get_ether_tuple(hdev, fs, rule);
6221 break;
6222 default:
6223 return -EOPNOTSUPP;
6224 }
6225
6226 if (fs->flow_type & FLOW_EXT) {
6227 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6228 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6229 hclge_fd_get_user_def_tuple(info, rule);
6230 }
6231
6232 if (fs->flow_type & FLOW_MAC_EXT) {
6233 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6234 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6235 }
6236
6237 return 0;
6238}
6239
6240static int hclge_fd_config_rule(struct hclge_dev *hdev,
6241 struct hclge_fd_rule *rule)
6242{
6243 int ret;
6244
6245 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6246 if (ret)
6247 return ret;
6248
6249 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6250}
6251
6252static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6253 struct hclge_fd_rule *rule)
6254{
6255 int ret;
6256
6257 spin_lock_bh(&hdev->fd_rule_lock);
6258
6259 if (hdev->fd_active_type != rule->rule_type &&
6260 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6261 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6262 dev_err(&hdev->pdev->dev,
6263 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6264 rule->rule_type, hdev->fd_active_type);
6265 spin_unlock_bh(&hdev->fd_rule_lock);
6266 return -EINVAL;
6267 }
6268
6269 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6270 if (ret)
6271 goto out;
6272
6273 ret = hclge_clear_arfs_rules(hdev);
6274 if (ret)
6275 goto out;
6276
6277 ret = hclge_fd_config_rule(hdev, rule);
6278 if (ret)
6279 goto out;
6280
6281 rule->state = HCLGE_FD_ACTIVE;
6282 hdev->fd_active_type = rule->rule_type;
6283 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6284
6285out:
6286 spin_unlock_bh(&hdev->fd_rule_lock);
6287 return ret;
6288}
6289
6290static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6291{
6292 struct hclge_vport *vport = hclge_get_vport(handle);
6293 struct hclge_dev *hdev = vport->back;
6294
6295 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6296}
6297
6298static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6299 u16 *vport_id, u8 *action, u16 *queue_id)
6300{
6301 struct hclge_vport *vport = hdev->vport;
6302
6303 if (ring_cookie == RX_CLS_FLOW_DISC) {
6304 *action = HCLGE_FD_ACTION_DROP_PACKET;
6305 } else {
6306 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6307 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6308 u16 tqps;
6309
6310
6311
6312
6313 if (vf > hdev->num_req_vfs) {
6314 dev_err(&hdev->pdev->dev,
6315 "Error: vf id (%u) should be less than %u\n",
6316 vf - 1U, hdev->num_req_vfs);
6317 return -EINVAL;
6318 }
6319
6320 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6321 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6322
6323 if (ring >= tqps) {
6324 dev_err(&hdev->pdev->dev,
6325 "Error: queue id (%u) > max tqp num (%u)\n",
6326 ring, tqps - 1U);
6327 return -EINVAL;
6328 }
6329
6330 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6331 *queue_id = ring;
6332 }
6333
6334 return 0;
6335}
6336
6337static int hclge_add_fd_entry(struct hnae3_handle *handle,
6338 struct ethtool_rxnfc *cmd)
6339{
6340 struct hclge_vport *vport = hclge_get_vport(handle);
6341 struct hclge_dev *hdev = vport->back;
6342 struct hclge_fd_user_def_info info;
6343 u16 dst_vport_id = 0, q_index = 0;
6344 struct ethtool_rx_flow_spec *fs;
6345 struct hclge_fd_rule *rule;
6346 u32 unused = 0;
6347 u8 action;
6348 int ret;
6349
6350 if (!hnae3_dev_fd_supported(hdev)) {
6351 dev_err(&hdev->pdev->dev,
6352 "flow table director is not supported\n");
6353 return -EOPNOTSUPP;
6354 }
6355
6356 if (!hdev->fd_en) {
6357 dev_err(&hdev->pdev->dev,
6358 "please enable flow director first\n");
6359 return -EOPNOTSUPP;
6360 }
6361
6362 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6363
6364 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6365 if (ret)
6366 return ret;
6367
6368 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6369 &action, &q_index);
6370 if (ret)
6371 return ret;
6372
6373 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6374 if (!rule)
6375 return -ENOMEM;
6376
6377 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6378 if (ret) {
6379 kfree(rule);
6380 return ret;
6381 }
6382
6383 rule->flow_type = fs->flow_type;
6384 rule->location = fs->location;
6385 rule->unused_tuple = unused;
6386 rule->vf_id = dst_vport_id;
6387 rule->queue_id = q_index;
6388 rule->action = action;
6389 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6390
6391 ret = hclge_add_fd_entry_common(hdev, rule);
6392 if (ret)
6393 kfree(rule);
6394
6395 return ret;
6396}
6397
6398static int hclge_del_fd_entry(struct hnae3_handle *handle,
6399 struct ethtool_rxnfc *cmd)
6400{
6401 struct hclge_vport *vport = hclge_get_vport(handle);
6402 struct hclge_dev *hdev = vport->back;
6403 struct ethtool_rx_flow_spec *fs;
6404 int ret;
6405
6406 if (!hnae3_dev_fd_supported(hdev))
6407 return -EOPNOTSUPP;
6408
6409 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6410
6411 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6412 return -EINVAL;
6413
6414 spin_lock_bh(&hdev->fd_rule_lock);
6415 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6416 !test_bit(fs->location, hdev->fd_bmap)) {
6417 dev_err(&hdev->pdev->dev,
6418 "Delete fail, rule %u is inexistent\n", fs->location);
6419 spin_unlock_bh(&hdev->fd_rule_lock);
6420 return -ENOENT;
6421 }
6422
6423 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6424 NULL, false);
6425 if (ret)
6426 goto out;
6427
6428 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6429
6430out:
6431 spin_unlock_bh(&hdev->fd_rule_lock);
6432 return ret;
6433}
6434
6435static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6436 bool clear_list)
6437{
6438 struct hclge_fd_rule *rule;
6439 struct hlist_node *node;
6440 u16 location;
6441
6442 if (!hnae3_dev_fd_supported(hdev))
6443 return;
6444
6445 spin_lock_bh(&hdev->fd_rule_lock);
6446
6447 for_each_set_bit(location, hdev->fd_bmap,
6448 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6449 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6450 NULL, false);
6451
6452 if (clear_list) {
6453 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6454 rule_node) {
6455 hlist_del(&rule->rule_node);
6456 kfree(rule);
6457 }
6458 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6459 hdev->hclge_fd_rule_num = 0;
6460 bitmap_zero(hdev->fd_bmap,
6461 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6462 }
6463
6464 spin_unlock_bh(&hdev->fd_rule_lock);
6465}
6466
6467static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6468{
6469 hclge_clear_fd_rules_in_list(hdev, true);
6470 hclge_fd_disable_user_def(hdev);
6471}
6472
6473static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6474{
6475 struct hclge_vport *vport = hclge_get_vport(handle);
6476 struct hclge_dev *hdev = vport->back;
6477 struct hclge_fd_rule *rule;
6478 struct hlist_node *node;
6479
6480
6481
6482
6483
6484 if (!hnae3_dev_fd_supported(hdev))
6485 return 0;
6486
6487
6488 if (!hdev->fd_en)
6489 return 0;
6490
6491 spin_lock_bh(&hdev->fd_rule_lock);
6492 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6493 if (rule->state == HCLGE_FD_ACTIVE)
6494 rule->state = HCLGE_FD_TO_ADD;
6495 }
6496 spin_unlock_bh(&hdev->fd_rule_lock);
6497 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6498
6499 return 0;
6500}
6501
6502static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6503 struct ethtool_rxnfc *cmd)
6504{
6505 struct hclge_vport *vport = hclge_get_vport(handle);
6506 struct hclge_dev *hdev = vport->back;
6507
6508 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6509 return -EOPNOTSUPP;
6510
6511 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6512 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6513
6514 return 0;
6515}
6516
6517static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6518 struct ethtool_tcpip4_spec *spec,
6519 struct ethtool_tcpip4_spec *spec_mask)
6520{
6521 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6522 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6523 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6524
6525 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6526 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6527 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6528
6529 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6530 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6531 0 : cpu_to_be16(rule->tuples_mask.src_port);
6532
6533 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6534 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6535 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6536
6537 spec->tos = rule->tuples.ip_tos;
6538 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6539 0 : rule->tuples_mask.ip_tos;
6540}
6541
6542static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6543 struct ethtool_usrip4_spec *spec,
6544 struct ethtool_usrip4_spec *spec_mask)
6545{
6546 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6547 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6548 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6549
6550 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6551 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6552 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6553
6554 spec->tos = rule->tuples.ip_tos;
6555 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6556 0 : rule->tuples_mask.ip_tos;
6557
6558 spec->proto = rule->tuples.ip_proto;
6559 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6560 0 : rule->tuples_mask.ip_proto;
6561
6562 spec->ip_ver = ETH_RX_NFC_IP4;
6563}
6564
6565static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6566 struct ethtool_tcpip6_spec *spec,
6567 struct ethtool_tcpip6_spec *spec_mask)
6568{
6569 cpu_to_be32_array(spec->ip6src,
6570 rule->tuples.src_ip, IPV6_SIZE);
6571 cpu_to_be32_array(spec->ip6dst,
6572 rule->tuples.dst_ip, IPV6_SIZE);
6573 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6574 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6575 else
6576 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6577 IPV6_SIZE);
6578
6579 if (rule->unused_tuple & BIT(INNER_DST_IP))
6580 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6581 else
6582 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6583 IPV6_SIZE);
6584
6585 spec->tclass = rule->tuples.ip_tos;
6586 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6587 0 : rule->tuples_mask.ip_tos;
6588
6589 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6590 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6591 0 : cpu_to_be16(rule->tuples_mask.src_port);
6592
6593 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6594 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6595 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6596}
6597
6598static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6599 struct ethtool_usrip6_spec *spec,
6600 struct ethtool_usrip6_spec *spec_mask)
6601{
6602 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6603 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6604 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6605 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6606 else
6607 cpu_to_be32_array(spec_mask->ip6src,
6608 rule->tuples_mask.src_ip, IPV6_SIZE);
6609
6610 if (rule->unused_tuple & BIT(INNER_DST_IP))
6611 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6612 else
6613 cpu_to_be32_array(spec_mask->ip6dst,
6614 rule->tuples_mask.dst_ip, IPV6_SIZE);
6615
6616 spec->tclass = rule->tuples.ip_tos;
6617 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6618 0 : rule->tuples_mask.ip_tos;
6619
6620 spec->l4_proto = rule->tuples.ip_proto;
6621 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6622 0 : rule->tuples_mask.ip_proto;
6623}
6624
6625static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6626 struct ethhdr *spec,
6627 struct ethhdr *spec_mask)
6628{
6629 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6630 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6631
6632 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6633 eth_zero_addr(spec_mask->h_source);
6634 else
6635 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6636
6637 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6638 eth_zero_addr(spec_mask->h_dest);
6639 else
6640 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6641
6642 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6643 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6644 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6645}
6646
6647static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6648 struct hclge_fd_rule *rule)
6649{
6650 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6651 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6652 fs->h_ext.data[0] = 0;
6653 fs->h_ext.data[1] = 0;
6654 fs->m_ext.data[0] = 0;
6655 fs->m_ext.data[1] = 0;
6656 } else {
6657 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6658 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6659 fs->m_ext.data[0] =
6660 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6661 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6662 }
6663}
6664
6665static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6666 struct hclge_fd_rule *rule)
6667{
6668 if (fs->flow_type & FLOW_EXT) {
6669 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6670 fs->m_ext.vlan_tci =
6671 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6672 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6673
6674 hclge_fd_get_user_def_info(fs, rule);
6675 }
6676
6677 if (fs->flow_type & FLOW_MAC_EXT) {
6678 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6679 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6680 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6681 else
6682 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6683 rule->tuples_mask.dst_mac);
6684 }
6685}
6686
6687static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6688 u16 location)
6689{
6690 struct hclge_fd_rule *rule = NULL;
6691 struct hlist_node *node2;
6692
6693 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6694 if (rule->location == location)
6695 return rule;
6696 else if (rule->location > location)
6697 return NULL;
6698 }
6699
6700 return NULL;
6701}
6702
6703static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6704 struct hclge_fd_rule *rule)
6705{
6706 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6707 fs->ring_cookie = RX_CLS_FLOW_DISC;
6708 } else {
6709 u64 vf_id;
6710
6711 fs->ring_cookie = rule->queue_id;
6712 vf_id = rule->vf_id;
6713 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6714 fs->ring_cookie |= vf_id;
6715 }
6716}
6717
6718static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6719 struct ethtool_rxnfc *cmd)
6720{
6721 struct hclge_vport *vport = hclge_get_vport(handle);
6722 struct hclge_fd_rule *rule = NULL;
6723 struct hclge_dev *hdev = vport->back;
6724 struct ethtool_rx_flow_spec *fs;
6725
6726 if (!hnae3_dev_fd_supported(hdev))
6727 return -EOPNOTSUPP;
6728
6729 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6730
6731 spin_lock_bh(&hdev->fd_rule_lock);
6732
6733 rule = hclge_get_fd_rule(hdev, fs->location);
6734 if (!rule) {
6735 spin_unlock_bh(&hdev->fd_rule_lock);
6736 return -ENOENT;
6737 }
6738
6739 fs->flow_type = rule->flow_type;
6740 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6741 case SCTP_V4_FLOW:
6742 case TCP_V4_FLOW:
6743 case UDP_V4_FLOW:
6744 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6745 &fs->m_u.tcp_ip4_spec);
6746 break;
6747 case IP_USER_FLOW:
6748 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6749 &fs->m_u.usr_ip4_spec);
6750 break;
6751 case SCTP_V6_FLOW:
6752 case TCP_V6_FLOW:
6753 case UDP_V6_FLOW:
6754 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6755 &fs->m_u.tcp_ip6_spec);
6756 break;
6757 case IPV6_USER_FLOW:
6758 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6759 &fs->m_u.usr_ip6_spec);
6760 break;
6761
6762
6763
6764
6765 default:
6766 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6767 &fs->m_u.ether_spec);
6768 break;
6769 }
6770
6771 hclge_fd_get_ext_info(fs, rule);
6772
6773 hclge_fd_get_ring_cookie(fs, rule);
6774
6775 spin_unlock_bh(&hdev->fd_rule_lock);
6776
6777 return 0;
6778}
6779
6780static int hclge_get_all_rules(struct hnae3_handle *handle,
6781 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6782{
6783 struct hclge_vport *vport = hclge_get_vport(handle);
6784 struct hclge_dev *hdev = vport->back;
6785 struct hclge_fd_rule *rule;
6786 struct hlist_node *node2;
6787 int cnt = 0;
6788
6789 if (!hnae3_dev_fd_supported(hdev))
6790 return -EOPNOTSUPP;
6791
6792 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6793
6794 spin_lock_bh(&hdev->fd_rule_lock);
6795 hlist_for_each_entry_safe(rule, node2,
6796 &hdev->fd_rule_list, rule_node) {
6797 if (cnt == cmd->rule_cnt) {
6798 spin_unlock_bh(&hdev->fd_rule_lock);
6799 return -EMSGSIZE;
6800 }
6801
6802 if (rule->state == HCLGE_FD_TO_DEL)
6803 continue;
6804
6805 rule_locs[cnt] = rule->location;
6806 cnt++;
6807 }
6808
6809 spin_unlock_bh(&hdev->fd_rule_lock);
6810
6811 cmd->rule_cnt = cnt;
6812
6813 return 0;
6814}
6815
6816static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6817 struct hclge_fd_rule_tuples *tuples)
6818{
6819#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6820#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6821
6822 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6823 tuples->ip_proto = fkeys->basic.ip_proto;
6824 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6825
6826 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6827 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6828 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6829 } else {
6830 int i;
6831
6832 for (i = 0; i < IPV6_SIZE; i++) {
6833 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6834 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6835 }
6836 }
6837}
6838
6839
6840static struct hclge_fd_rule *
6841hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6842 const struct hclge_fd_rule_tuples *tuples)
6843{
6844 struct hclge_fd_rule *rule = NULL;
6845 struct hlist_node *node;
6846
6847 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6848 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6849 return rule;
6850 }
6851
6852 return NULL;
6853}
6854
6855static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6856 struct hclge_fd_rule *rule)
6857{
6858 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6859 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6860 BIT(INNER_SRC_PORT);
6861 rule->action = 0;
6862 rule->vf_id = 0;
6863 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6864 rule->state = HCLGE_FD_TO_ADD;
6865 if (tuples->ether_proto == ETH_P_IP) {
6866 if (tuples->ip_proto == IPPROTO_TCP)
6867 rule->flow_type = TCP_V4_FLOW;
6868 else
6869 rule->flow_type = UDP_V4_FLOW;
6870 } else {
6871 if (tuples->ip_proto == IPPROTO_TCP)
6872 rule->flow_type = TCP_V6_FLOW;
6873 else
6874 rule->flow_type = UDP_V6_FLOW;
6875 }
6876 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6877 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6878}
6879
6880static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6881 u16 flow_id, struct flow_keys *fkeys)
6882{
6883 struct hclge_vport *vport = hclge_get_vport(handle);
6884 struct hclge_fd_rule_tuples new_tuples = {};
6885 struct hclge_dev *hdev = vport->back;
6886 struct hclge_fd_rule *rule;
6887 u16 bit_id;
6888
6889 if (!hnae3_dev_fd_supported(hdev))
6890 return -EOPNOTSUPP;
6891
6892
6893
6894
6895 spin_lock_bh(&hdev->fd_rule_lock);
6896 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6897 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6898 spin_unlock_bh(&hdev->fd_rule_lock);
6899 return -EOPNOTSUPP;
6900 }
6901
6902 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6903
6904
6905
6906
6907
6908
6909 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6910 if (!rule) {
6911 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6912 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6913 spin_unlock_bh(&hdev->fd_rule_lock);
6914 return -ENOSPC;
6915 }
6916
6917 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6918 if (!rule) {
6919 spin_unlock_bh(&hdev->fd_rule_lock);
6920 return -ENOMEM;
6921 }
6922
6923 rule->location = bit_id;
6924 rule->arfs.flow_id = flow_id;
6925 rule->queue_id = queue_id;
6926 hclge_fd_build_arfs_rule(&new_tuples, rule);
6927 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6928 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
6929 } else if (rule->queue_id != queue_id) {
6930 rule->queue_id = queue_id;
6931 rule->state = HCLGE_FD_TO_ADD;
6932 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6933 hclge_task_schedule(hdev, 0);
6934 }
6935 spin_unlock_bh(&hdev->fd_rule_lock);
6936 return rule->location;
6937}
6938
6939static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6940{
6941#ifdef CONFIG_RFS_ACCEL
6942 struct hnae3_handle *handle = &hdev->vport[0].nic;
6943 struct hclge_fd_rule *rule;
6944 struct hlist_node *node;
6945
6946 spin_lock_bh(&hdev->fd_rule_lock);
6947 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6948 spin_unlock_bh(&hdev->fd_rule_lock);
6949 return;
6950 }
6951 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6952 if (rule->state != HCLGE_FD_ACTIVE)
6953 continue;
6954 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6955 rule->arfs.flow_id, rule->location)) {
6956 rule->state = HCLGE_FD_TO_DEL;
6957 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6958 }
6959 }
6960 spin_unlock_bh(&hdev->fd_rule_lock);
6961#endif
6962}
6963
6964
6965static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
6966{
6967#ifdef CONFIG_RFS_ACCEL
6968 struct hclge_fd_rule *rule;
6969 struct hlist_node *node;
6970 int ret;
6971
6972 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
6973 return 0;
6974
6975 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6976 switch (rule->state) {
6977 case HCLGE_FD_TO_DEL:
6978 case HCLGE_FD_ACTIVE:
6979 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6980 rule->location, NULL, false);
6981 if (ret)
6982 return ret;
6983 fallthrough;
6984 case HCLGE_FD_TO_ADD:
6985 hclge_fd_dec_rule_cnt(hdev, rule->location);
6986 hlist_del(&rule->rule_node);
6987 kfree(rule);
6988 break;
6989 default:
6990 break;
6991 }
6992 }
6993 hclge_sync_fd_state(hdev);
6994
6995#endif
6996 return 0;
6997}
6998
6999static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7000 struct hclge_fd_rule *rule)
7001{
7002 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7003 struct flow_match_basic match;
7004 u16 ethtype_key, ethtype_mask;
7005
7006 flow_rule_match_basic(flow, &match);
7007 ethtype_key = ntohs(match.key->n_proto);
7008 ethtype_mask = ntohs(match.mask->n_proto);
7009
7010 if (ethtype_key == ETH_P_ALL) {
7011 ethtype_key = 0;
7012 ethtype_mask = 0;
7013 }
7014 rule->tuples.ether_proto = ethtype_key;
7015 rule->tuples_mask.ether_proto = ethtype_mask;
7016 rule->tuples.ip_proto = match.key->ip_proto;
7017 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7018 } else {
7019 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7020 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7021 }
7022}
7023
7024static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7025 struct hclge_fd_rule *rule)
7026{
7027 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7028 struct flow_match_eth_addrs match;
7029
7030 flow_rule_match_eth_addrs(flow, &match);
7031 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7032 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7033 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7034 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7035 } else {
7036 rule->unused_tuple |= BIT(INNER_DST_MAC);
7037 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7038 }
7039}
7040
7041static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7042 struct hclge_fd_rule *rule)
7043{
7044 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7045 struct flow_match_vlan match;
7046
7047 flow_rule_match_vlan(flow, &match);
7048 rule->tuples.vlan_tag1 = match.key->vlan_id |
7049 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7050 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7051 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7052 } else {
7053 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7054 }
7055}
7056
7057static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7058 struct hclge_fd_rule *rule)
7059{
7060 u16 addr_type = 0;
7061
7062 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7063 struct flow_match_control match;
7064
7065 flow_rule_match_control(flow, &match);
7066 addr_type = match.key->addr_type;
7067 }
7068
7069 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7070 struct flow_match_ipv4_addrs match;
7071
7072 flow_rule_match_ipv4_addrs(flow, &match);
7073 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7074 rule->tuples_mask.src_ip[IPV4_INDEX] =
7075 be32_to_cpu(match.mask->src);
7076 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7077 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7078 be32_to_cpu(match.mask->dst);
7079 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7080 struct flow_match_ipv6_addrs match;
7081
7082 flow_rule_match_ipv6_addrs(flow, &match);
7083 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7084 IPV6_SIZE);
7085 be32_to_cpu_array(rule->tuples_mask.src_ip,
7086 match.mask->src.s6_addr32, IPV6_SIZE);
7087 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7088 IPV6_SIZE);
7089 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7090 match.mask->dst.s6_addr32, IPV6_SIZE);
7091 } else {
7092 rule->unused_tuple |= BIT(INNER_SRC_IP);
7093 rule->unused_tuple |= BIT(INNER_DST_IP);
7094 }
7095}
7096
7097static void hclge_get_cls_key_port(const struct flow_rule *flow,
7098 struct hclge_fd_rule *rule)
7099{
7100 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7101 struct flow_match_ports match;
7102
7103 flow_rule_match_ports(flow, &match);
7104
7105 rule->tuples.src_port = be16_to_cpu(match.key->src);
7106 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7107 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7108 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7109 } else {
7110 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7111 rule->unused_tuple |= BIT(INNER_DST_PORT);
7112 }
7113}
7114
7115static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7116 struct flow_cls_offload *cls_flower,
7117 struct hclge_fd_rule *rule)
7118{
7119 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7120 struct flow_dissector *dissector = flow->match.dissector;
7121
7122 if (dissector->used_keys &
7123 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7124 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7125 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7126 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7127 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7128 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7129 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7130 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7131 dissector->used_keys);
7132 return -EOPNOTSUPP;
7133 }
7134
7135 hclge_get_cls_key_basic(flow, rule);
7136 hclge_get_cls_key_mac(flow, rule);
7137 hclge_get_cls_key_vlan(flow, rule);
7138 hclge_get_cls_key_ip(flow, rule);
7139 hclge_get_cls_key_port(flow, rule);
7140
7141 return 0;
7142}
7143
7144static int hclge_check_cls_flower(struct hclge_dev *hdev,
7145 struct flow_cls_offload *cls_flower, int tc)
7146{
7147 u32 prio = cls_flower->common.prio;
7148
7149 if (tc < 0 || tc > hdev->tc_max) {
7150 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7151 return -EINVAL;
7152 }
7153
7154 if (prio == 0 ||
7155 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7156 dev_err(&hdev->pdev->dev,
7157 "prio %u should be in range[1, %u]\n",
7158 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7159 return -EINVAL;
7160 }
7161
7162 if (test_bit(prio - 1, hdev->fd_bmap)) {
7163 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7164 return -EINVAL;
7165 }
7166 return 0;
7167}
7168
7169static int hclge_add_cls_flower(struct hnae3_handle *handle,
7170 struct flow_cls_offload *cls_flower,
7171 int tc)
7172{
7173 struct hclge_vport *vport = hclge_get_vport(handle);
7174 struct hclge_dev *hdev = vport->back;
7175 struct hclge_fd_rule *rule;
7176 int ret;
7177
7178 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7179 if (ret) {
7180 dev_err(&hdev->pdev->dev,
7181 "failed to check cls flower params, ret = %d\n", ret);
7182 return ret;
7183 }
7184
7185 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7186 if (!rule)
7187 return -ENOMEM;
7188
7189 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7190 if (ret) {
7191 kfree(rule);
7192 return ret;
7193 }
7194
7195 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7196 rule->cls_flower.tc = tc;
7197 rule->location = cls_flower->common.prio - 1;
7198 rule->vf_id = 0;
7199 rule->cls_flower.cookie = cls_flower->cookie;
7200 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7201
7202 ret = hclge_add_fd_entry_common(hdev, rule);
7203 if (ret)
7204 kfree(rule);
7205
7206 return ret;
7207}
7208
7209static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7210 unsigned long cookie)
7211{
7212 struct hclge_fd_rule *rule;
7213 struct hlist_node *node;
7214
7215 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7216 if (rule->cls_flower.cookie == cookie)
7217 return rule;
7218 }
7219
7220 return NULL;
7221}
7222
7223static int hclge_del_cls_flower(struct hnae3_handle *handle,
7224 struct flow_cls_offload *cls_flower)
7225{
7226 struct hclge_vport *vport = hclge_get_vport(handle);
7227 struct hclge_dev *hdev = vport->back;
7228 struct hclge_fd_rule *rule;
7229 int ret;
7230
7231 spin_lock_bh(&hdev->fd_rule_lock);
7232
7233 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7234 if (!rule) {
7235 spin_unlock_bh(&hdev->fd_rule_lock);
7236 return -EINVAL;
7237 }
7238
7239 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7240 NULL, false);
7241 if (ret) {
7242 spin_unlock_bh(&hdev->fd_rule_lock);
7243 return ret;
7244 }
7245
7246 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7247 spin_unlock_bh(&hdev->fd_rule_lock);
7248
7249 return 0;
7250}
7251
7252static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7253{
7254 struct hclge_fd_rule *rule;
7255 struct hlist_node *node;
7256 int ret = 0;
7257
7258 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7259 return;
7260
7261 spin_lock_bh(&hdev->fd_rule_lock);
7262
7263 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7264 switch (rule->state) {
7265 case HCLGE_FD_TO_ADD:
7266 ret = hclge_fd_config_rule(hdev, rule);
7267 if (ret)
7268 goto out;
7269 rule->state = HCLGE_FD_ACTIVE;
7270 break;
7271 case HCLGE_FD_TO_DEL:
7272 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7273 rule->location, NULL, false);
7274 if (ret)
7275 goto out;
7276 hclge_fd_dec_rule_cnt(hdev, rule->location);
7277 hclge_fd_free_node(hdev, rule);
7278 break;
7279 default:
7280 break;
7281 }
7282 }
7283
7284out:
7285 if (ret)
7286 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7287
7288 spin_unlock_bh(&hdev->fd_rule_lock);
7289}
7290
7291static void hclge_sync_fd_table(struct hclge_dev *hdev)
7292{
7293 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7294 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7295
7296 hclge_clear_fd_rules_in_list(hdev, clear_list);
7297 }
7298
7299 hclge_sync_fd_user_def_cfg(hdev, false);
7300
7301 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7302}
7303
7304static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7305{
7306 struct hclge_vport *vport = hclge_get_vport(handle);
7307 struct hclge_dev *hdev = vport->back;
7308
7309 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7310 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7311}
7312
7313static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7314{
7315 struct hclge_vport *vport = hclge_get_vport(handle);
7316 struct hclge_dev *hdev = vport->back;
7317
7318 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7319}
7320
7321static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7322{
7323 struct hclge_vport *vport = hclge_get_vport(handle);
7324 struct hclge_dev *hdev = vport->back;
7325
7326 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7327}
7328
7329static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7330{
7331 struct hclge_vport *vport = hclge_get_vport(handle);
7332 struct hclge_dev *hdev = vport->back;
7333
7334 return hdev->rst_stats.hw_reset_done_cnt;
7335}
7336
7337static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7338{
7339 struct hclge_vport *vport = hclge_get_vport(handle);
7340 struct hclge_dev *hdev = vport->back;
7341
7342 hdev->fd_en = enable;
7343
7344 if (!enable)
7345 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7346 else
7347 hclge_restore_fd_entries(handle);
7348
7349 hclge_task_schedule(hdev, 0);
7350}
7351
7352static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7353{
7354 struct hclge_desc desc;
7355 struct hclge_config_mac_mode_cmd *req =
7356 (struct hclge_config_mac_mode_cmd *)desc.data;
7357 u32 loop_en = 0;
7358 int ret;
7359
7360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7361
7362 if (enable) {
7363 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7364 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7365 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7366 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7367 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7368 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7369 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7370 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7371 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7372 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7373 }
7374
7375 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7376
7377 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7378 if (ret)
7379 dev_err(&hdev->pdev->dev,
7380 "mac enable fail, ret =%d.\n", ret);
7381}
7382
7383static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7384 u8 switch_param, u8 param_mask)
7385{
7386 struct hclge_mac_vlan_switch_cmd *req;
7387 struct hclge_desc desc;
7388 u32 func_id;
7389 int ret;
7390
7391 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7392 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7393
7394
7395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7396 true);
7397 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7398 req->func_id = cpu_to_le32(func_id);
7399
7400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7401 if (ret) {
7402 dev_err(&hdev->pdev->dev,
7403 "read mac vlan switch parameter fail, ret = %d\n", ret);
7404 return ret;
7405 }
7406
7407
7408 hclge_comm_cmd_reuse_desc(&desc, false);
7409 req->switch_param = (req->switch_param & param_mask) | switch_param;
7410 req->param_mask = param_mask;
7411
7412 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7413 if (ret)
7414 dev_err(&hdev->pdev->dev,
7415 "set mac vlan switch parameter fail, ret = %d\n", ret);
7416 return ret;
7417}
7418
7419static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7420 int link_ret)
7421{
7422#define HCLGE_PHY_LINK_STATUS_NUM 200
7423
7424 struct phy_device *phydev = hdev->hw.mac.phydev;
7425 int i = 0;
7426 int ret;
7427
7428 do {
7429 ret = phy_read_status(phydev);
7430 if (ret) {
7431 dev_err(&hdev->pdev->dev,
7432 "phy update link status fail, ret = %d\n", ret);
7433 return;
7434 }
7435
7436 if (phydev->link == link_ret)
7437 break;
7438
7439 msleep(HCLGE_LINK_STATUS_MS);
7440 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7441}
7442
7443static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7444{
7445#define HCLGE_MAC_LINK_STATUS_NUM 100
7446
7447 int link_status;
7448 int i = 0;
7449 int ret;
7450
7451 do {
7452 ret = hclge_get_mac_link_status(hdev, &link_status);
7453 if (ret)
7454 return ret;
7455 if (link_status == link_ret)
7456 return 0;
7457
7458 msleep(HCLGE_LINK_STATUS_MS);
7459 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7460 return -EBUSY;
7461}
7462
7463static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7464 bool is_phy)
7465{
7466 int link_ret;
7467
7468 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7469
7470 if (is_phy)
7471 hclge_phy_link_status_wait(hdev, link_ret);
7472
7473 return hclge_mac_link_status_wait(hdev, link_ret);
7474}
7475
7476static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7477{
7478 struct hclge_config_mac_mode_cmd *req;
7479 struct hclge_desc desc;
7480 u32 loop_en;
7481 int ret;
7482
7483 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7484
7485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7486 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7487 if (ret) {
7488 dev_err(&hdev->pdev->dev,
7489 "mac loopback get fail, ret =%d.\n", ret);
7490 return ret;
7491 }
7492
7493
7494 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7495 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7496
7497 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7498
7499
7500
7501
7502 hclge_comm_cmd_reuse_desc(&desc, false);
7503 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7504 if (ret)
7505 dev_err(&hdev->pdev->dev,
7506 "mac loopback set fail, ret =%d.\n", ret);
7507 return ret;
7508}
7509
7510static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7511 enum hnae3_loop loop_mode)
7512{
7513 struct hclge_common_lb_cmd *req;
7514 struct hclge_desc desc;
7515 u8 loop_mode_b;
7516 int ret;
7517
7518 req = (struct hclge_common_lb_cmd *)desc.data;
7519 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7520
7521 switch (loop_mode) {
7522 case HNAE3_LOOP_SERIAL_SERDES:
7523 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7524 break;
7525 case HNAE3_LOOP_PARALLEL_SERDES:
7526 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7527 break;
7528 case HNAE3_LOOP_PHY:
7529 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7530 break;
7531 default:
7532 dev_err(&hdev->pdev->dev,
7533 "unsupported loopback mode %d\n", loop_mode);
7534 return -ENOTSUPP;
7535 }
7536
7537 req->mask = loop_mode_b;
7538 if (en)
7539 req->enable = loop_mode_b;
7540
7541 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7542 if (ret)
7543 dev_err(&hdev->pdev->dev,
7544 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7545 loop_mode, ret);
7546
7547 return ret;
7548}
7549
7550static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7551{
7552#define HCLGE_COMMON_LB_RETRY_MS 10
7553#define HCLGE_COMMON_LB_RETRY_NUM 100
7554
7555 struct hclge_common_lb_cmd *req;
7556 struct hclge_desc desc;
7557 u32 i = 0;
7558 int ret;
7559
7560 req = (struct hclge_common_lb_cmd *)desc.data;
7561
7562 do {
7563 msleep(HCLGE_COMMON_LB_RETRY_MS);
7564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7565 true);
7566 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7567 if (ret) {
7568 dev_err(&hdev->pdev->dev,
7569 "failed to get loopback done status, ret = %d\n",
7570 ret);
7571 return ret;
7572 }
7573 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7574 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7575
7576 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7577 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7578 return -EBUSY;
7579 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7580 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7581 return -EIO;
7582 }
7583
7584 return 0;
7585}
7586
7587static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7588 enum hnae3_loop loop_mode)
7589{
7590 int ret;
7591
7592 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7593 if (ret)
7594 return ret;
7595
7596 return hclge_cfg_common_loopback_wait(hdev);
7597}
7598
7599static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7600 enum hnae3_loop loop_mode)
7601{
7602 int ret;
7603
7604 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7605 if (ret)
7606 return ret;
7607
7608 hclge_cfg_mac_mode(hdev, en);
7609
7610 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7611 if (ret)
7612 dev_err(&hdev->pdev->dev,
7613 "serdes loopback config mac mode timeout\n");
7614
7615 return ret;
7616}
7617
7618static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7619 struct phy_device *phydev)
7620{
7621 int ret;
7622
7623 if (!phydev->suspended) {
7624 ret = phy_suspend(phydev);
7625 if (ret)
7626 return ret;
7627 }
7628
7629 ret = phy_resume(phydev);
7630 if (ret)
7631 return ret;
7632
7633 return phy_loopback(phydev, true);
7634}
7635
7636static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7637 struct phy_device *phydev)
7638{
7639 int ret;
7640
7641 ret = phy_loopback(phydev, false);
7642 if (ret)
7643 return ret;
7644
7645 return phy_suspend(phydev);
7646}
7647
7648static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7649{
7650 struct phy_device *phydev = hdev->hw.mac.phydev;
7651 int ret;
7652
7653 if (!phydev) {
7654 if (hnae3_dev_phy_imp_supported(hdev))
7655 return hclge_set_common_loopback(hdev, en,
7656 HNAE3_LOOP_PHY);
7657 return -ENOTSUPP;
7658 }
7659
7660 if (en)
7661 ret = hclge_enable_phy_loopback(hdev, phydev);
7662 else
7663 ret = hclge_disable_phy_loopback(hdev, phydev);
7664 if (ret) {
7665 dev_err(&hdev->pdev->dev,
7666 "set phy loopback fail, ret = %d\n", ret);
7667 return ret;
7668 }
7669
7670 hclge_cfg_mac_mode(hdev, en);
7671
7672 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7673 if (ret)
7674 dev_err(&hdev->pdev->dev,
7675 "phy loopback config mac mode timeout\n");
7676
7677 return ret;
7678}
7679
7680static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7681 u16 stream_id, bool enable)
7682{
7683 struct hclge_desc desc;
7684 struct hclge_cfg_com_tqp_queue_cmd *req =
7685 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7686
7687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7688 req->tqp_id = cpu_to_le16(tqp_id);
7689 req->stream_id = cpu_to_le16(stream_id);
7690 if (enable)
7691 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7692
7693 return hclge_cmd_send(&hdev->hw, &desc, 1);
7694}
7695
7696static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7697{
7698 struct hclge_vport *vport = hclge_get_vport(handle);
7699 struct hclge_dev *hdev = vport->back;
7700 int ret;
7701 u16 i;
7702
7703 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7704 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7705 if (ret)
7706 return ret;
7707 }
7708 return 0;
7709}
7710
7711static int hclge_set_loopback(struct hnae3_handle *handle,
7712 enum hnae3_loop loop_mode, bool en)
7713{
7714 struct hclge_vport *vport = hclge_get_vport(handle);
7715 struct hclge_dev *hdev = vport->back;
7716 int ret;
7717
7718
7719
7720
7721
7722
7723 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7724 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7725
7726 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7727 HCLGE_SWITCH_ALW_LPBK_MASK);
7728 if (ret)
7729 return ret;
7730 }
7731
7732 switch (loop_mode) {
7733 case HNAE3_LOOP_APP:
7734 ret = hclge_set_app_loopback(hdev, en);
7735 break;
7736 case HNAE3_LOOP_SERIAL_SERDES:
7737 case HNAE3_LOOP_PARALLEL_SERDES:
7738 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7739 break;
7740 case HNAE3_LOOP_PHY:
7741 ret = hclge_set_phy_loopback(hdev, en);
7742 break;
7743 default:
7744 ret = -ENOTSUPP;
7745 dev_err(&hdev->pdev->dev,
7746 "loop_mode %d is not supported\n", loop_mode);
7747 break;
7748 }
7749
7750 if (ret)
7751 return ret;
7752
7753 ret = hclge_tqp_enable(handle, en);
7754 if (ret)
7755 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7756 en ? "enable" : "disable", ret);
7757
7758 return ret;
7759}
7760
7761static int hclge_set_default_loopback(struct hclge_dev *hdev)
7762{
7763 int ret;
7764
7765 ret = hclge_set_app_loopback(hdev, false);
7766 if (ret)
7767 return ret;
7768
7769 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7770 if (ret)
7771 return ret;
7772
7773 return hclge_cfg_common_loopback(hdev, false,
7774 HNAE3_LOOP_PARALLEL_SERDES);
7775}
7776
7777static void hclge_flush_link_update(struct hclge_dev *hdev)
7778{
7779#define HCLGE_FLUSH_LINK_TIMEOUT 100000
7780
7781 unsigned long last = hdev->serv_processed_cnt;
7782 int i = 0;
7783
7784 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7785 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7786 last == hdev->serv_processed_cnt)
7787 usleep_range(1, 1);
7788}
7789
7790static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7791{
7792 struct hclge_vport *vport = hclge_get_vport(handle);
7793 struct hclge_dev *hdev = vport->back;
7794
7795 if (enable) {
7796 hclge_task_schedule(hdev, 0);
7797 } else {
7798
7799 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7800
7801
7802 smp_mb__before_atomic();
7803 hclge_flush_link_update(hdev);
7804 }
7805}
7806
7807static int hclge_ae_start(struct hnae3_handle *handle)
7808{
7809 struct hclge_vport *vport = hclge_get_vport(handle);
7810 struct hclge_dev *hdev = vport->back;
7811
7812
7813 hclge_cfg_mac_mode(hdev, true);
7814 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7815 hdev->hw.mac.link = 0;
7816
7817
7818 hclge_comm_reset_tqp_stats(handle);
7819
7820 hclge_mac_start_phy(hdev);
7821
7822 return 0;
7823}
7824
7825static void hclge_ae_stop(struct hnae3_handle *handle)
7826{
7827 struct hclge_vport *vport = hclge_get_vport(handle);
7828 struct hclge_dev *hdev = vport->back;
7829
7830 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7831 spin_lock_bh(&hdev->fd_rule_lock);
7832 hclge_clear_arfs_rules(hdev);
7833 spin_unlock_bh(&hdev->fd_rule_lock);
7834
7835
7836
7837
7838 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7839 hdev->reset_type != HNAE3_FUNC_RESET &&
7840 hdev->reset_type != HNAE3_FLR_RESET) {
7841 hclge_mac_stop_phy(hdev);
7842 hclge_update_link_status(hdev);
7843 return;
7844 }
7845
7846 hclge_reset_tqp(handle);
7847
7848 hclge_config_mac_tnl_int(hdev, false);
7849
7850
7851 hclge_cfg_mac_mode(hdev, false);
7852
7853 hclge_mac_stop_phy(hdev);
7854
7855
7856 hclge_comm_reset_tqp_stats(handle);
7857 hclge_update_link_status(hdev);
7858}
7859
7860int hclge_vport_start(struct hclge_vport *vport)
7861{
7862 struct hclge_dev *hdev = vport->back;
7863
7864 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7865 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
7866 vport->last_active_jiffies = jiffies;
7867
7868 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7869 if (vport->vport_id) {
7870 hclge_restore_mac_table_common(vport);
7871 hclge_restore_vport_vlan_table(vport);
7872 } else {
7873 hclge_restore_hw_table(hdev);
7874 }
7875 }
7876
7877 clear_bit(vport->vport_id, hdev->vport_config_block);
7878
7879 return 0;
7880}
7881
7882void hclge_vport_stop(struct hclge_vport *vport)
7883{
7884 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7885}
7886
7887static int hclge_client_start(struct hnae3_handle *handle)
7888{
7889 struct hclge_vport *vport = hclge_get_vport(handle);
7890
7891 return hclge_vport_start(vport);
7892}
7893
7894static void hclge_client_stop(struct hnae3_handle *handle)
7895{
7896 struct hclge_vport *vport = hclge_get_vport(handle);
7897
7898 hclge_vport_stop(vport);
7899}
7900
7901static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7902 u16 cmdq_resp, u8 resp_code,
7903 enum hclge_mac_vlan_tbl_opcode op)
7904{
7905 struct hclge_dev *hdev = vport->back;
7906
7907 if (cmdq_resp) {
7908 dev_err(&hdev->pdev->dev,
7909 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7910 cmdq_resp);
7911 return -EIO;
7912 }
7913
7914 if (op == HCLGE_MAC_VLAN_ADD) {
7915 if (!resp_code || resp_code == 1)
7916 return 0;
7917 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7918 resp_code == HCLGE_ADD_MC_OVERFLOW)
7919 return -ENOSPC;
7920
7921 dev_err(&hdev->pdev->dev,
7922 "add mac addr failed for undefined, code=%u.\n",
7923 resp_code);
7924 return -EIO;
7925 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7926 if (!resp_code) {
7927 return 0;
7928 } else if (resp_code == 1) {
7929 dev_dbg(&hdev->pdev->dev,
7930 "remove mac addr failed for miss.\n");
7931 return -ENOENT;
7932 }
7933
7934 dev_err(&hdev->pdev->dev,
7935 "remove mac addr failed for undefined, code=%u.\n",
7936 resp_code);
7937 return -EIO;
7938 } else if (op == HCLGE_MAC_VLAN_LKUP) {
7939 if (!resp_code) {
7940 return 0;
7941 } else if (resp_code == 1) {
7942 dev_dbg(&hdev->pdev->dev,
7943 "lookup mac addr failed for miss.\n");
7944 return -ENOENT;
7945 }
7946
7947 dev_err(&hdev->pdev->dev,
7948 "lookup mac addr failed for undefined, code=%u.\n",
7949 resp_code);
7950 return -EIO;
7951 }
7952
7953 dev_err(&hdev->pdev->dev,
7954 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7955
7956 return -EINVAL;
7957}
7958
7959static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7960{
7961#define HCLGE_VF_NUM_IN_FIRST_DESC 192
7962
7963 unsigned int word_num;
7964 unsigned int bit_num;
7965
7966 if (vfid > 255 || vfid < 0)
7967 return -EIO;
7968
7969 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7970 word_num = vfid / 32;
7971 bit_num = vfid % 32;
7972 if (clr)
7973 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7974 else
7975 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7976 } else {
7977 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7978 bit_num = vfid % 32;
7979 if (clr)
7980 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7981 else
7982 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7983 }
7984
7985 return 0;
7986}
7987
7988static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7989{
7990#define HCLGE_DESC_NUMBER 3
7991#define HCLGE_FUNC_NUMBER_PER_DESC 6
7992 int i, j;
7993
7994 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7995 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7996 if (desc[i].data[j])
7997 return false;
7998
7999 return true;
8000}
8001
8002static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8003 const u8 *addr, bool is_mc)
8004{
8005 const unsigned char *mac_addr = addr;
8006 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8007 (mac_addr[0]) | (mac_addr[1] << 8);
8008 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8009
8010 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8011 if (is_mc) {
8012 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8013 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8014 }
8015
8016 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8017 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8018}
8019
8020static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8021 struct hclge_mac_vlan_tbl_entry_cmd *req)
8022{
8023 struct hclge_dev *hdev = vport->back;
8024 struct hclge_desc desc;
8025 u8 resp_code;
8026 u16 retval;
8027 int ret;
8028
8029 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8030
8031 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8032
8033 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8034 if (ret) {
8035 dev_err(&hdev->pdev->dev,
8036 "del mac addr failed for cmd_send, ret =%d.\n",
8037 ret);
8038 return ret;
8039 }
8040 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8041 retval = le16_to_cpu(desc.retval);
8042
8043 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8044 HCLGE_MAC_VLAN_REMOVE);
8045}
8046
8047static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8048 struct hclge_mac_vlan_tbl_entry_cmd *req,
8049 struct hclge_desc *desc,
8050 bool is_mc)
8051{
8052 struct hclge_dev *hdev = vport->back;
8053 u8 resp_code;
8054 u16 retval;
8055 int ret;
8056
8057 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8058 if (is_mc) {
8059 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8060 memcpy(desc[0].data,
8061 req,
8062 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8063 hclge_cmd_setup_basic_desc(&desc[1],
8064 HCLGE_OPC_MAC_VLAN_ADD,
8065 true);
8066 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8067 hclge_cmd_setup_basic_desc(&desc[2],
8068 HCLGE_OPC_MAC_VLAN_ADD,
8069 true);
8070 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8071 } else {
8072 memcpy(desc[0].data,
8073 req,
8074 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8075 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8076 }
8077 if (ret) {
8078 dev_err(&hdev->pdev->dev,
8079 "lookup mac addr failed for cmd_send, ret =%d.\n",
8080 ret);
8081 return ret;
8082 }
8083 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8084 retval = le16_to_cpu(desc[0].retval);
8085
8086 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8087 HCLGE_MAC_VLAN_LKUP);
8088}
8089
8090static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8091 struct hclge_mac_vlan_tbl_entry_cmd *req,
8092 struct hclge_desc *mc_desc)
8093{
8094 struct hclge_dev *hdev = vport->back;
8095 int cfg_status;
8096 u8 resp_code;
8097 u16 retval;
8098 int ret;
8099
8100 if (!mc_desc) {
8101 struct hclge_desc desc;
8102
8103 hclge_cmd_setup_basic_desc(&desc,
8104 HCLGE_OPC_MAC_VLAN_ADD,
8105 false);
8106 memcpy(desc.data, req,
8107 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8108 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8109 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8110 retval = le16_to_cpu(desc.retval);
8111
8112 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8113 resp_code,
8114 HCLGE_MAC_VLAN_ADD);
8115 } else {
8116 hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8117 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8118 hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8119 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8120 hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8121 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8122 memcpy(mc_desc[0].data, req,
8123 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8124 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8125 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8126 retval = le16_to_cpu(mc_desc[0].retval);
8127
8128 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8129 resp_code,
8130 HCLGE_MAC_VLAN_ADD);
8131 }
8132
8133 if (ret) {
8134 dev_err(&hdev->pdev->dev,
8135 "add mac addr failed for cmd_send, ret =%d.\n",
8136 ret);
8137 return ret;
8138 }
8139
8140 return cfg_status;
8141}
8142
8143static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8144 u16 *allocated_size)
8145{
8146 struct hclge_umv_spc_alc_cmd *req;
8147 struct hclge_desc desc;
8148 int ret;
8149
8150 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8152
8153 req->space_size = cpu_to_le32(space_size);
8154
8155 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8156 if (ret) {
8157 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8158 ret);
8159 return ret;
8160 }
8161
8162 *allocated_size = le32_to_cpu(desc.data[1]);
8163
8164 return 0;
8165}
8166
8167static int hclge_init_umv_space(struct hclge_dev *hdev)
8168{
8169 u16 allocated_size = 0;
8170 int ret;
8171
8172 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8173 if (ret)
8174 return ret;
8175
8176 if (allocated_size < hdev->wanted_umv_size)
8177 dev_warn(&hdev->pdev->dev,
8178 "failed to alloc umv space, want %u, get %u\n",
8179 hdev->wanted_umv_size, allocated_size);
8180
8181 hdev->max_umv_size = allocated_size;
8182 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8183 hdev->share_umv_size = hdev->priv_umv_size +
8184 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8185
8186 if (hdev->ae_dev->dev_specs.mc_mac_size)
8187 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8188
8189 return 0;
8190}
8191
8192static void hclge_reset_umv_space(struct hclge_dev *hdev)
8193{
8194 struct hclge_vport *vport;
8195 int i;
8196
8197 for (i = 0; i < hdev->num_alloc_vport; i++) {
8198 vport = &hdev->vport[i];
8199 vport->used_umv_num = 0;
8200 }
8201
8202 mutex_lock(&hdev->vport_lock);
8203 hdev->share_umv_size = hdev->priv_umv_size +
8204 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8205 mutex_unlock(&hdev->vport_lock);
8206
8207 hdev->used_mc_mac_num = 0;
8208}
8209
8210static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8211{
8212 struct hclge_dev *hdev = vport->back;
8213 bool is_full;
8214
8215 if (need_lock)
8216 mutex_lock(&hdev->vport_lock);
8217
8218 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8219 hdev->share_umv_size == 0);
8220
8221 if (need_lock)
8222 mutex_unlock(&hdev->vport_lock);
8223
8224 return is_full;
8225}
8226
8227static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8228{
8229 struct hclge_dev *hdev = vport->back;
8230
8231 if (is_free) {
8232 if (vport->used_umv_num > hdev->priv_umv_size)
8233 hdev->share_umv_size++;
8234
8235 if (vport->used_umv_num > 0)
8236 vport->used_umv_num--;
8237 } else {
8238 if (vport->used_umv_num >= hdev->priv_umv_size &&
8239 hdev->share_umv_size > 0)
8240 hdev->share_umv_size--;
8241 vport->used_umv_num++;
8242 }
8243}
8244
8245static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8246 const u8 *mac_addr)
8247{
8248 struct hclge_mac_node *mac_node, *tmp;
8249
8250 list_for_each_entry_safe(mac_node, tmp, list, node)
8251 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8252 return mac_node;
8253
8254 return NULL;
8255}
8256
8257static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8258 enum HCLGE_MAC_NODE_STATE state)
8259{
8260 switch (state) {
8261
8262 case HCLGE_MAC_TO_ADD:
8263 if (mac_node->state == HCLGE_MAC_TO_DEL)
8264 mac_node->state = HCLGE_MAC_ACTIVE;
8265 break;
8266
8267 case HCLGE_MAC_TO_DEL:
8268 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8269 list_del(&mac_node->node);
8270 kfree(mac_node);
8271 } else {
8272 mac_node->state = HCLGE_MAC_TO_DEL;
8273 }
8274 break;
8275
8276
8277
8278 case HCLGE_MAC_ACTIVE:
8279 if (mac_node->state == HCLGE_MAC_TO_ADD)
8280 mac_node->state = HCLGE_MAC_ACTIVE;
8281
8282 break;
8283 }
8284}
8285
8286int hclge_update_mac_list(struct hclge_vport *vport,
8287 enum HCLGE_MAC_NODE_STATE state,
8288 enum HCLGE_MAC_ADDR_TYPE mac_type,
8289 const unsigned char *addr)
8290{
8291 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8292 struct hclge_dev *hdev = vport->back;
8293 struct hclge_mac_node *mac_node;
8294 struct list_head *list;
8295
8296 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8297 &vport->uc_mac_list : &vport->mc_mac_list;
8298
8299 spin_lock_bh(&vport->mac_list_lock);
8300
8301
8302
8303
8304
8305 mac_node = hclge_find_mac_node(list, addr);
8306 if (mac_node) {
8307 hclge_update_mac_node(mac_node, state);
8308 spin_unlock_bh(&vport->mac_list_lock);
8309 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8310 return 0;
8311 }
8312
8313
8314 if (state == HCLGE_MAC_TO_DEL) {
8315 spin_unlock_bh(&vport->mac_list_lock);
8316 hnae3_format_mac_addr(format_mac_addr, addr);
8317 dev_err(&hdev->pdev->dev,
8318 "failed to delete address %s from mac list\n",
8319 format_mac_addr);
8320 return -ENOENT;
8321 }
8322
8323 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8324 if (!mac_node) {
8325 spin_unlock_bh(&vport->mac_list_lock);
8326 return -ENOMEM;
8327 }
8328
8329 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8330
8331 mac_node->state = state;
8332 ether_addr_copy(mac_node->mac_addr, addr);
8333 list_add_tail(&mac_node->node, list);
8334
8335 spin_unlock_bh(&vport->mac_list_lock);
8336
8337 return 0;
8338}
8339
8340static int hclge_add_uc_addr(struct hnae3_handle *handle,
8341 const unsigned char *addr)
8342{
8343 struct hclge_vport *vport = hclge_get_vport(handle);
8344
8345 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8346 addr);
8347}
8348
8349int hclge_add_uc_addr_common(struct hclge_vport *vport,
8350 const unsigned char *addr)
8351{
8352 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8353 struct hclge_dev *hdev = vport->back;
8354 struct hclge_mac_vlan_tbl_entry_cmd req;
8355 struct hclge_desc desc;
8356 u16 egress_port = 0;
8357 int ret;
8358
8359
8360 if (is_zero_ether_addr(addr) ||
8361 is_broadcast_ether_addr(addr) ||
8362 is_multicast_ether_addr(addr)) {
8363 hnae3_format_mac_addr(format_mac_addr, addr);
8364 dev_err(&hdev->pdev->dev,
8365 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8366 format_mac_addr, is_zero_ether_addr(addr),
8367 is_broadcast_ether_addr(addr),
8368 is_multicast_ether_addr(addr));
8369 return -EINVAL;
8370 }
8371
8372 memset(&req, 0, sizeof(req));
8373
8374 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8375 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8376
8377 req.egress_port = cpu_to_le16(egress_port);
8378
8379 hclge_prepare_mac_addr(&req, addr, false);
8380
8381
8382
8383
8384
8385 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8386 if (ret == -ENOENT) {
8387 mutex_lock(&hdev->vport_lock);
8388 if (!hclge_is_umv_space_full(vport, false)) {
8389 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8390 if (!ret)
8391 hclge_update_umv_space(vport, false);
8392 mutex_unlock(&hdev->vport_lock);
8393 return ret;
8394 }
8395 mutex_unlock(&hdev->vport_lock);
8396
8397 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8398 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8399 hdev->priv_umv_size);
8400
8401 return -ENOSPC;
8402 }
8403
8404
8405 if (!ret)
8406 return -EEXIST;
8407
8408 return ret;
8409}
8410
8411static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8412 const unsigned char *addr)
8413{
8414 struct hclge_vport *vport = hclge_get_vport(handle);
8415
8416 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8417 addr);
8418}
8419
8420int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8421 const unsigned char *addr)
8422{
8423 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8424 struct hclge_dev *hdev = vport->back;
8425 struct hclge_mac_vlan_tbl_entry_cmd req;
8426 int ret;
8427
8428
8429 if (is_zero_ether_addr(addr) ||
8430 is_broadcast_ether_addr(addr) ||
8431 is_multicast_ether_addr(addr)) {
8432 hnae3_format_mac_addr(format_mac_addr, addr);
8433 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8434 format_mac_addr);
8435 return -EINVAL;
8436 }
8437
8438 memset(&req, 0, sizeof(req));
8439 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8440 hclge_prepare_mac_addr(&req, addr, false);
8441 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8442 if (!ret || ret == -ENOENT) {
8443 mutex_lock(&hdev->vport_lock);
8444 hclge_update_umv_space(vport, true);
8445 mutex_unlock(&hdev->vport_lock);
8446 return 0;
8447 }
8448
8449 return ret;
8450}
8451
8452static int hclge_add_mc_addr(struct hnae3_handle *handle,
8453 const unsigned char *addr)
8454{
8455 struct hclge_vport *vport = hclge_get_vport(handle);
8456
8457 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8458 addr);
8459}
8460
8461int hclge_add_mc_addr_common(struct hclge_vport *vport,
8462 const unsigned char *addr)
8463{
8464 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8465 struct hclge_dev *hdev = vport->back;
8466 struct hclge_mac_vlan_tbl_entry_cmd req;
8467 struct hclge_desc desc[3];
8468 bool is_new_addr = false;
8469 int status;
8470
8471
8472 if (!is_multicast_ether_addr(addr)) {
8473 hnae3_format_mac_addr(format_mac_addr, addr);
8474 dev_err(&hdev->pdev->dev,
8475 "Add mc mac err! invalid mac:%s.\n",
8476 format_mac_addr);
8477 return -EINVAL;
8478 }
8479 memset(&req, 0, sizeof(req));
8480 hclge_prepare_mac_addr(&req, addr, true);
8481 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8482 if (status) {
8483 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8484 hdev->used_mc_mac_num >=
8485 hdev->ae_dev->dev_specs.mc_mac_size)
8486 goto err_no_space;
8487
8488 is_new_addr = true;
8489
8490
8491 memset(desc[0].data, 0, sizeof(desc[0].data));
8492 memset(desc[1].data, 0, sizeof(desc[0].data));
8493 memset(desc[2].data, 0, sizeof(desc[0].data));
8494 }
8495 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8496 if (status)
8497 return status;
8498 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8499 if (status == -ENOSPC)
8500 goto err_no_space;
8501 else if (!status && is_new_addr)
8502 hdev->used_mc_mac_num++;
8503
8504 return status;
8505
8506err_no_space:
8507
8508 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8509 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8510 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8511 }
8512
8513 return -ENOSPC;
8514}
8515
8516static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8517 const unsigned char *addr)
8518{
8519 struct hclge_vport *vport = hclge_get_vport(handle);
8520
8521 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8522 addr);
8523}
8524
8525int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8526 const unsigned char *addr)
8527{
8528 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8529 struct hclge_dev *hdev = vport->back;
8530 struct hclge_mac_vlan_tbl_entry_cmd req;
8531 enum hclge_comm_cmd_status status;
8532 struct hclge_desc desc[3];
8533
8534
8535 if (!is_multicast_ether_addr(addr)) {
8536 hnae3_format_mac_addr(format_mac_addr, addr);
8537 dev_dbg(&hdev->pdev->dev,
8538 "Remove mc mac err! invalid mac:%s.\n",
8539 format_mac_addr);
8540 return -EINVAL;
8541 }
8542
8543 memset(&req, 0, sizeof(req));
8544 hclge_prepare_mac_addr(&req, addr, true);
8545 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8546 if (!status) {
8547
8548 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8549 if (status)
8550 return status;
8551
8552 if (hclge_is_all_function_id_zero(desc)) {
8553
8554 status = hclge_remove_mac_vlan_tbl(vport, &req);
8555 if (!status)
8556 hdev->used_mc_mac_num--;
8557 } else {
8558
8559 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8560 }
8561 } else if (status == -ENOENT) {
8562 status = 0;
8563 }
8564
8565 return status;
8566}
8567
8568static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8569 struct list_head *list,
8570 enum HCLGE_MAC_ADDR_TYPE mac_type)
8571{
8572 int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8573 struct hclge_mac_node *mac_node, *tmp;
8574 int ret;
8575
8576 if (mac_type == HCLGE_MAC_ADDR_UC)
8577 sync = hclge_add_uc_addr_common;
8578 else
8579 sync = hclge_add_mc_addr_common;
8580
8581 list_for_each_entry_safe(mac_node, tmp, list, node) {
8582 ret = sync(vport, mac_node->mac_addr);
8583 if (!ret) {
8584 mac_node->state = HCLGE_MAC_ACTIVE;
8585 } else {
8586 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8587 &vport->state);
8588
8589
8590
8591
8592
8593
8594
8595
8596
8597 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8598 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8599 break;
8600 }
8601 }
8602}
8603
8604static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8605 struct list_head *list,
8606 enum HCLGE_MAC_ADDR_TYPE mac_type)
8607{
8608 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8609 struct hclge_mac_node *mac_node, *tmp;
8610 int ret;
8611
8612 if (mac_type == HCLGE_MAC_ADDR_UC)
8613 unsync = hclge_rm_uc_addr_common;
8614 else
8615 unsync = hclge_rm_mc_addr_common;
8616
8617 list_for_each_entry_safe(mac_node, tmp, list, node) {
8618 ret = unsync(vport, mac_node->mac_addr);
8619 if (!ret || ret == -ENOENT) {
8620 list_del(&mac_node->node);
8621 kfree(mac_node);
8622 } else {
8623 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8624 &vport->state);
8625 break;
8626 }
8627 }
8628}
8629
8630static bool hclge_sync_from_add_list(struct list_head *add_list,
8631 struct list_head *mac_list)
8632{
8633 struct hclge_mac_node *mac_node, *tmp, *new_node;
8634 bool all_added = true;
8635
8636 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8637 if (mac_node->state == HCLGE_MAC_TO_ADD)
8638 all_added = false;
8639
8640
8641
8642
8643
8644
8645
8646
8647
8648 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8649 if (new_node) {
8650 hclge_update_mac_node(new_node, mac_node->state);
8651 list_del(&mac_node->node);
8652 kfree(mac_node);
8653 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8654 mac_node->state = HCLGE_MAC_TO_DEL;
8655 list_move_tail(&mac_node->node, mac_list);
8656 } else {
8657 list_del(&mac_node->node);
8658 kfree(mac_node);
8659 }
8660 }
8661
8662 return all_added;
8663}
8664
8665static void hclge_sync_from_del_list(struct list_head *del_list,
8666 struct list_head *mac_list)
8667{
8668 struct hclge_mac_node *mac_node, *tmp, *new_node;
8669
8670 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8671 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8672 if (new_node) {
8673
8674
8675
8676
8677
8678
8679
8680 new_node->state = HCLGE_MAC_ACTIVE;
8681 list_del(&mac_node->node);
8682 kfree(mac_node);
8683 } else {
8684 list_move_tail(&mac_node->node, mac_list);
8685 }
8686 }
8687}
8688
8689static void hclge_update_overflow_flags(struct hclge_vport *vport,
8690 enum HCLGE_MAC_ADDR_TYPE mac_type,
8691 bool is_all_added)
8692{
8693 if (mac_type == HCLGE_MAC_ADDR_UC) {
8694 if (is_all_added)
8695 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8696 else
8697 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8698 } else {
8699 if (is_all_added)
8700 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8701 else
8702 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8703 }
8704}
8705
8706static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8707 enum HCLGE_MAC_ADDR_TYPE mac_type)
8708{
8709 struct hclge_mac_node *mac_node, *tmp, *new_node;
8710 struct list_head tmp_add_list, tmp_del_list;
8711 struct list_head *list;
8712 bool all_added;
8713
8714 INIT_LIST_HEAD(&tmp_add_list);
8715 INIT_LIST_HEAD(&tmp_del_list);
8716
8717
8718
8719
8720 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8721 &vport->uc_mac_list : &vport->mc_mac_list;
8722
8723 spin_lock_bh(&vport->mac_list_lock);
8724
8725 list_for_each_entry_safe(mac_node, tmp, list, node) {
8726 switch (mac_node->state) {
8727 case HCLGE_MAC_TO_DEL:
8728 list_move_tail(&mac_node->node, &tmp_del_list);
8729 break;
8730 case HCLGE_MAC_TO_ADD:
8731 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8732 if (!new_node)
8733 goto stop_traverse;
8734 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8735 new_node->state = mac_node->state;
8736 list_add_tail(&new_node->node, &tmp_add_list);
8737 break;
8738 default:
8739 break;
8740 }
8741 }
8742
8743stop_traverse:
8744 spin_unlock_bh(&vport->mac_list_lock);
8745
8746
8747 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8748 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8749
8750
8751
8752
8753 spin_lock_bh(&vport->mac_list_lock);
8754
8755 hclge_sync_from_del_list(&tmp_del_list, list);
8756 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8757
8758 spin_unlock_bh(&vport->mac_list_lock);
8759
8760 hclge_update_overflow_flags(vport, mac_type, all_added);
8761}
8762
8763static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8764{
8765 struct hclge_dev *hdev = vport->back;
8766
8767 if (test_bit(vport->vport_id, hdev->vport_config_block))
8768 return false;
8769
8770 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8771 return true;
8772
8773 return false;
8774}
8775
8776static void hclge_sync_mac_table(struct hclge_dev *hdev)
8777{
8778 int i;
8779
8780 for (i = 0; i < hdev->num_alloc_vport; i++) {
8781 struct hclge_vport *vport = &hdev->vport[i];
8782
8783 if (!hclge_need_sync_mac_table(vport))
8784 continue;
8785
8786 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8787 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8788 }
8789}
8790
8791static void hclge_build_del_list(struct list_head *list,
8792 bool is_del_list,
8793 struct list_head *tmp_del_list)
8794{
8795 struct hclge_mac_node *mac_cfg, *tmp;
8796
8797 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8798 switch (mac_cfg->state) {
8799 case HCLGE_MAC_TO_DEL:
8800 case HCLGE_MAC_ACTIVE:
8801 list_move_tail(&mac_cfg->node, tmp_del_list);
8802 break;
8803 case HCLGE_MAC_TO_ADD:
8804 if (is_del_list) {
8805 list_del(&mac_cfg->node);
8806 kfree(mac_cfg);
8807 }
8808 break;
8809 }
8810 }
8811}
8812
8813static void hclge_unsync_del_list(struct hclge_vport *vport,
8814 int (*unsync)(struct hclge_vport *vport,
8815 const unsigned char *addr),
8816 bool is_del_list,
8817 struct list_head *tmp_del_list)
8818{
8819 struct hclge_mac_node *mac_cfg, *tmp;
8820 int ret;
8821
8822 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8823 ret = unsync(vport, mac_cfg->mac_addr);
8824 if (!ret || ret == -ENOENT) {
8825
8826
8827
8828
8829 if (!is_del_list &&
8830 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8831 mac_cfg->state = HCLGE_MAC_TO_ADD;
8832 } else {
8833 list_del(&mac_cfg->node);
8834 kfree(mac_cfg);
8835 }
8836 } else if (is_del_list) {
8837 mac_cfg->state = HCLGE_MAC_TO_DEL;
8838 }
8839 }
8840}
8841
8842void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8843 enum HCLGE_MAC_ADDR_TYPE mac_type)
8844{
8845 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8846 struct hclge_dev *hdev = vport->back;
8847 struct list_head tmp_del_list, *list;
8848
8849 if (mac_type == HCLGE_MAC_ADDR_UC) {
8850 list = &vport->uc_mac_list;
8851 unsync = hclge_rm_uc_addr_common;
8852 } else {
8853 list = &vport->mc_mac_list;
8854 unsync = hclge_rm_mc_addr_common;
8855 }
8856
8857 INIT_LIST_HEAD(&tmp_del_list);
8858
8859 if (!is_del_list)
8860 set_bit(vport->vport_id, hdev->vport_config_block);
8861
8862 spin_lock_bh(&vport->mac_list_lock);
8863
8864 hclge_build_del_list(list, is_del_list, &tmp_del_list);
8865
8866 spin_unlock_bh(&vport->mac_list_lock);
8867
8868 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8869
8870 spin_lock_bh(&vport->mac_list_lock);
8871
8872 hclge_sync_from_del_list(&tmp_del_list, list);
8873
8874 spin_unlock_bh(&vport->mac_list_lock);
8875}
8876
8877
8878static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8879 enum HCLGE_MAC_ADDR_TYPE mac_type)
8880{
8881 struct hclge_mac_node *mac_node, *tmp;
8882 struct hclge_dev *hdev = vport->back;
8883 struct list_head tmp_del_list, *list;
8884
8885 INIT_LIST_HEAD(&tmp_del_list);
8886
8887 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8888 &vport->uc_mac_list : &vport->mc_mac_list;
8889
8890 spin_lock_bh(&vport->mac_list_lock);
8891
8892 list_for_each_entry_safe(mac_node, tmp, list, node) {
8893 switch (mac_node->state) {
8894 case HCLGE_MAC_TO_DEL:
8895 case HCLGE_MAC_ACTIVE:
8896 list_move_tail(&mac_node->node, &tmp_del_list);
8897 break;
8898 case HCLGE_MAC_TO_ADD:
8899 list_del(&mac_node->node);
8900 kfree(mac_node);
8901 break;
8902 }
8903 }
8904
8905 spin_unlock_bh(&vport->mac_list_lock);
8906
8907 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8908
8909 if (!list_empty(&tmp_del_list))
8910 dev_warn(&hdev->pdev->dev,
8911 "uninit %s mac list for vport %u not completely.\n",
8912 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8913 vport->vport_id);
8914
8915 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8916 list_del(&mac_node->node);
8917 kfree(mac_node);
8918 }
8919}
8920
8921static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8922{
8923 struct hclge_vport *vport;
8924 int i;
8925
8926 for (i = 0; i < hdev->num_alloc_vport; i++) {
8927 vport = &hdev->vport[i];
8928 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8929 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8930 }
8931}
8932
8933static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8934 u16 cmdq_resp, u8 resp_code)
8935{
8936#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
8937#define HCLGE_ETHERTYPE_ALREADY_ADD 1
8938#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
8939#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
8940
8941 int return_status;
8942
8943 if (cmdq_resp) {
8944 dev_err(&hdev->pdev->dev,
8945 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8946 cmdq_resp);
8947 return -EIO;
8948 }
8949
8950 switch (resp_code) {
8951 case HCLGE_ETHERTYPE_SUCCESS_ADD:
8952 case HCLGE_ETHERTYPE_ALREADY_ADD:
8953 return_status = 0;
8954 break;
8955 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8956 dev_err(&hdev->pdev->dev,
8957 "add mac ethertype failed for manager table overflow.\n");
8958 return_status = -EIO;
8959 break;
8960 case HCLGE_ETHERTYPE_KEY_CONFLICT:
8961 dev_err(&hdev->pdev->dev,
8962 "add mac ethertype failed for key conflict.\n");
8963 return_status = -EIO;
8964 break;
8965 default:
8966 dev_err(&hdev->pdev->dev,
8967 "add mac ethertype failed for undefined, code=%u.\n",
8968 resp_code);
8969 return_status = -EIO;
8970 }
8971
8972 return return_status;
8973}
8974
8975static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8976 u8 *mac_addr)
8977{
8978 struct hclge_vport *vport = hclge_get_vport(handle);
8979 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8980 struct hclge_dev *hdev = vport->back;
8981
8982 vport = hclge_get_vf_vport(hdev, vf);
8983 if (!vport)
8984 return -EINVAL;
8985
8986 hnae3_format_mac_addr(format_mac_addr, mac_addr);
8987 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8988 dev_info(&hdev->pdev->dev,
8989 "Specified MAC(=%s) is same as before, no change committed!\n",
8990 format_mac_addr);
8991 return 0;
8992 }
8993
8994 ether_addr_copy(vport->vf_info.mac, mac_addr);
8995
8996
8997
8998
8999
9000 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9001 dev_info(&hdev->pdev->dev,
9002 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9003 vf, format_mac_addr);
9004 (void)hclge_inform_reset_assert_to_vf(vport);
9005 return 0;
9006 }
9007
9008 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
9009 vf, format_mac_addr);
9010 return 0;
9011}
9012
9013static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9014 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9015{
9016 struct hclge_desc desc;
9017 u8 resp_code;
9018 u16 retval;
9019 int ret;
9020
9021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9022 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9023
9024 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9025 if (ret) {
9026 dev_err(&hdev->pdev->dev,
9027 "add mac ethertype failed for cmd_send, ret =%d.\n",
9028 ret);
9029 return ret;
9030 }
9031
9032 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9033 retval = le16_to_cpu(desc.retval);
9034
9035 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9036}
9037
9038static int init_mgr_tbl(struct hclge_dev *hdev)
9039{
9040 int ret;
9041 int i;
9042
9043 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9044 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9045 if (ret) {
9046 dev_err(&hdev->pdev->dev,
9047 "add mac ethertype failed, ret =%d.\n",
9048 ret);
9049 return ret;
9050 }
9051 }
9052
9053 return 0;
9054}
9055
9056static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9057{
9058 struct hclge_vport *vport = hclge_get_vport(handle);
9059 struct hclge_dev *hdev = vport->back;
9060
9061 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9062}
9063
9064int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9065 const u8 *old_addr, const u8 *new_addr)
9066{
9067 struct list_head *list = &vport->uc_mac_list;
9068 struct hclge_mac_node *old_node, *new_node;
9069
9070 new_node = hclge_find_mac_node(list, new_addr);
9071 if (!new_node) {
9072 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9073 if (!new_node)
9074 return -ENOMEM;
9075
9076 new_node->state = HCLGE_MAC_TO_ADD;
9077 ether_addr_copy(new_node->mac_addr, new_addr);
9078 list_add(&new_node->node, list);
9079 } else {
9080 if (new_node->state == HCLGE_MAC_TO_DEL)
9081 new_node->state = HCLGE_MAC_ACTIVE;
9082
9083
9084
9085
9086
9087
9088 list_move(&new_node->node, list);
9089 }
9090
9091 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9092 old_node = hclge_find_mac_node(list, old_addr);
9093 if (old_node) {
9094 if (old_node->state == HCLGE_MAC_TO_ADD) {
9095 list_del(&old_node->node);
9096 kfree(old_node);
9097 } else {
9098 old_node->state = HCLGE_MAC_TO_DEL;
9099 }
9100 }
9101 }
9102
9103 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9104
9105 return 0;
9106}
9107
9108static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9109 bool is_first)
9110{
9111 const unsigned char *new_addr = (const unsigned char *)p;
9112 struct hclge_vport *vport = hclge_get_vport(handle);
9113 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9114 struct hclge_dev *hdev = vport->back;
9115 unsigned char *old_addr = NULL;
9116 int ret;
9117
9118
9119 if (is_zero_ether_addr(new_addr) ||
9120 is_broadcast_ether_addr(new_addr) ||
9121 is_multicast_ether_addr(new_addr)) {
9122 hnae3_format_mac_addr(format_mac_addr, new_addr);
9123 dev_err(&hdev->pdev->dev,
9124 "change uc mac err! invalid mac: %s.\n",
9125 format_mac_addr);
9126 return -EINVAL;
9127 }
9128
9129 ret = hclge_pause_addr_cfg(hdev, new_addr);
9130 if (ret) {
9131 dev_err(&hdev->pdev->dev,
9132 "failed to configure mac pause address, ret = %d\n",
9133 ret);
9134 return ret;
9135 }
9136
9137 if (!is_first)
9138 old_addr = hdev->hw.mac.mac_addr;
9139
9140 spin_lock_bh(&vport->mac_list_lock);
9141 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9142 if (ret) {
9143 hnae3_format_mac_addr(format_mac_addr, new_addr);
9144 dev_err(&hdev->pdev->dev,
9145 "failed to change the mac addr:%s, ret = %d\n",
9146 format_mac_addr, ret);
9147 spin_unlock_bh(&vport->mac_list_lock);
9148
9149 if (!is_first)
9150 hclge_pause_addr_cfg(hdev, old_addr);
9151
9152 return ret;
9153 }
9154
9155
9156
9157 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9158 spin_unlock_bh(&vport->mac_list_lock);
9159
9160 hclge_task_schedule(hdev, 0);
9161
9162 return 0;
9163}
9164
9165static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9166{
9167 struct mii_ioctl_data *data = if_mii(ifr);
9168
9169 if (!hnae3_dev_phy_imp_supported(hdev))
9170 return -EOPNOTSUPP;
9171
9172 switch (cmd) {
9173 case SIOCGMIIPHY:
9174 data->phy_id = hdev->hw.mac.phy_addr;
9175
9176 fallthrough;
9177 case SIOCGMIIREG:
9178 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9179 return 0;
9180
9181 case SIOCSMIIREG:
9182 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9183 default:
9184 return -EOPNOTSUPP;
9185 }
9186}
9187
9188static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9189 int cmd)
9190{
9191 struct hclge_vport *vport = hclge_get_vport(handle);
9192 struct hclge_dev *hdev = vport->back;
9193
9194 switch (cmd) {
9195 case SIOCGHWTSTAMP:
9196 return hclge_ptp_get_cfg(hdev, ifr);
9197 case SIOCSHWTSTAMP:
9198 return hclge_ptp_set_cfg(hdev, ifr);
9199 default:
9200 if (!hdev->hw.mac.phydev)
9201 return hclge_mii_ioctl(hdev, ifr, cmd);
9202 }
9203
9204 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9205}
9206
9207static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9208 bool bypass_en)
9209{
9210 struct hclge_port_vlan_filter_bypass_cmd *req;
9211 struct hclge_desc desc;
9212 int ret;
9213
9214 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9215 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9216 req->vf_id = vf_id;
9217 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9218 bypass_en ? 1 : 0);
9219
9220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9221 if (ret)
9222 dev_err(&hdev->pdev->dev,
9223 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9224 vf_id, ret);
9225
9226 return ret;
9227}
9228
9229static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9230 u8 fe_type, bool filter_en, u8 vf_id)
9231{
9232 struct hclge_vlan_filter_ctrl_cmd *req;
9233 struct hclge_desc desc;
9234 int ret;
9235
9236
9237 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9238 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9239 req->vlan_type = vlan_type;
9240 req->vf_id = vf_id;
9241
9242 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9243 if (ret) {
9244 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9245 vf_id, ret);
9246 return ret;
9247 }
9248
9249
9250 hclge_comm_cmd_reuse_desc(&desc, false);
9251 req->vlan_fe = filter_en ?
9252 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9253
9254 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9255 if (ret)
9256 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9257 vf_id, ret);
9258
9259 return ret;
9260}
9261
9262static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9263{
9264 struct hclge_dev *hdev = vport->back;
9265 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9266 int ret;
9267
9268 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9269 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9270 HCLGE_FILTER_FE_EGRESS_V1_B,
9271 enable, vport->vport_id);
9272
9273 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9274 HCLGE_FILTER_FE_EGRESS, enable,
9275 vport->vport_id);
9276 if (ret)
9277 return ret;
9278
9279 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9280 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9281 !enable);
9282 } else if (!vport->vport_id) {
9283 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9284 enable = false;
9285
9286 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9287 HCLGE_FILTER_FE_INGRESS,
9288 enable, 0);
9289 }
9290
9291 return ret;
9292}
9293
9294static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9295{
9296 struct hnae3_handle *handle = &vport->nic;
9297 struct hclge_vport_vlan_cfg *vlan, *tmp;
9298 struct hclge_dev *hdev = vport->back;
9299
9300 if (vport->vport_id) {
9301 if (vport->port_base_vlan_cfg.state !=
9302 HNAE3_PORT_BASE_VLAN_DISABLE)
9303 return true;
9304
9305 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9306 return false;
9307 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9308 return false;
9309 }
9310
9311 if (!vport->req_vlan_fltr_en)
9312 return false;
9313
9314
9315 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9316 return true;
9317
9318 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9319 if (vlan->vlan_id != 0)
9320 return true;
9321
9322 return false;
9323}
9324
9325int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9326{
9327 struct hclge_dev *hdev = vport->back;
9328 bool need_en;
9329 int ret;
9330
9331 mutex_lock(&hdev->vport_lock);
9332
9333 vport->req_vlan_fltr_en = request_en;
9334
9335 need_en = hclge_need_enable_vport_vlan_filter(vport);
9336 if (need_en == vport->cur_vlan_fltr_en) {
9337 mutex_unlock(&hdev->vport_lock);
9338 return 0;
9339 }
9340
9341 ret = hclge_set_vport_vlan_filter(vport, need_en);
9342 if (ret) {
9343 mutex_unlock(&hdev->vport_lock);
9344 return ret;
9345 }
9346
9347 vport->cur_vlan_fltr_en = need_en;
9348
9349 mutex_unlock(&hdev->vport_lock);
9350
9351 return 0;
9352}
9353
9354static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9355{
9356 struct hclge_vport *vport = hclge_get_vport(handle);
9357
9358 return hclge_enable_vport_vlan_filter(vport, enable);
9359}
9360
9361static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9362 bool is_kill, u16 vlan,
9363 struct hclge_desc *desc)
9364{
9365 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9366 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9367 u8 vf_byte_val;
9368 u8 vf_byte_off;
9369 int ret;
9370
9371 hclge_cmd_setup_basic_desc(&desc[0],
9372 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9373 hclge_cmd_setup_basic_desc(&desc[1],
9374 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9375
9376 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9377
9378 vf_byte_off = vfid / 8;
9379 vf_byte_val = 1 << (vfid % 8);
9380
9381 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9382 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9383
9384 req0->vlan_id = cpu_to_le16(vlan);
9385 req0->vlan_cfg = is_kill;
9386
9387 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9388 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9389 else
9390 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9391
9392 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9393 if (ret) {
9394 dev_err(&hdev->pdev->dev,
9395 "Send vf vlan command fail, ret =%d.\n",
9396 ret);
9397 return ret;
9398 }
9399
9400 return 0;
9401}
9402
9403static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9404 bool is_kill, struct hclge_desc *desc)
9405{
9406 struct hclge_vlan_filter_vf_cfg_cmd *req;
9407
9408 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9409
9410 if (!is_kill) {
9411#define HCLGE_VF_VLAN_NO_ENTRY 2
9412 if (!req->resp_code || req->resp_code == 1)
9413 return 0;
9414
9415 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9416 set_bit(vfid, hdev->vf_vlan_full);
9417 dev_warn(&hdev->pdev->dev,
9418 "vf vlan table is full, vf vlan filter is disabled\n");
9419 return 0;
9420 }
9421
9422 dev_err(&hdev->pdev->dev,
9423 "Add vf vlan filter fail, ret =%u.\n",
9424 req->resp_code);
9425 } else {
9426#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9427 if (!req->resp_code)
9428 return 0;
9429
9430
9431
9432
9433
9434
9435 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9436 return 0;
9437
9438 dev_err(&hdev->pdev->dev,
9439 "Kill vf vlan filter fail, ret =%u.\n",
9440 req->resp_code);
9441 }
9442
9443 return -EIO;
9444}
9445
9446static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9447 bool is_kill, u16 vlan)
9448{
9449 struct hclge_vport *vport = &hdev->vport[vfid];
9450 struct hclge_desc desc[2];
9451 int ret;
9452
9453
9454
9455
9456
9457
9458 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9459 if (vport->vf_info.spoofchk && vlan) {
9460 dev_err(&hdev->pdev->dev,
9461 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9462 return -EPERM;
9463 }
9464 return 0;
9465 }
9466
9467 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9468 if (ret)
9469 return ret;
9470
9471 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9472}
9473
9474static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9475 u16 vlan_id, bool is_kill)
9476{
9477 struct hclge_vlan_filter_pf_cfg_cmd *req;
9478 struct hclge_desc desc;
9479 u8 vlan_offset_byte_val;
9480 u8 vlan_offset_byte;
9481 u8 vlan_offset_160;
9482 int ret;
9483
9484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9485
9486 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9487 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9488 HCLGE_VLAN_BYTE_SIZE;
9489 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9490
9491 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9492 req->vlan_offset = vlan_offset_160;
9493 req->vlan_cfg = is_kill;
9494 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9495
9496 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9497 if (ret)
9498 dev_err(&hdev->pdev->dev,
9499 "port vlan command, send fail, ret =%d.\n", ret);
9500 return ret;
9501}
9502
9503static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9504 u16 vlan_id, bool is_kill)
9505{
9506
9507 if (!is_kill && !vlan_id &&
9508 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9509 return false;
9510
9511 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9512 dev_warn(&hdev->pdev->dev,
9513 "Add port vlan failed, vport %u is already in vlan %u\n",
9514 vport_id, vlan_id);
9515 return false;
9516 }
9517
9518 if (is_kill &&
9519 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9520 dev_warn(&hdev->pdev->dev,
9521 "Delete port vlan failed, vport %u is not in vlan %u\n",
9522 vport_id, vlan_id);
9523 return false;
9524 }
9525
9526 return true;
9527}
9528
9529static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9530 u16 vport_id, u16 vlan_id,
9531 bool is_kill)
9532{
9533 u16 vport_idx, vport_num = 0;
9534 int ret;
9535
9536 if (is_kill && !vlan_id)
9537 return 0;
9538
9539 if (vlan_id >= VLAN_N_VID)
9540 return -EINVAL;
9541
9542 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9543 if (ret) {
9544 dev_err(&hdev->pdev->dev,
9545 "Set %u vport vlan filter config fail, ret =%d.\n",
9546 vport_id, ret);
9547 return ret;
9548 }
9549
9550 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9551 return 0;
9552
9553 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9554 vport_num++;
9555
9556 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9557 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9558 is_kill);
9559
9560 return ret;
9561}
9562
9563static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9564{
9565 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9566 struct hclge_vport_vtag_tx_cfg_cmd *req;
9567 struct hclge_dev *hdev = vport->back;
9568 struct hclge_desc desc;
9569 u16 bmap_index;
9570 int status;
9571
9572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9573
9574 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9575 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9576 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9577 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9578 vcfg->accept_tag1 ? 1 : 0);
9579 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9580 vcfg->accept_untag1 ? 1 : 0);
9581 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9582 vcfg->accept_tag2 ? 1 : 0);
9583 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9584 vcfg->accept_untag2 ? 1 : 0);
9585 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9586 vcfg->insert_tag1_en ? 1 : 0);
9587 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9588 vcfg->insert_tag2_en ? 1 : 0);
9589 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9590 vcfg->tag_shift_mode_en ? 1 : 0);
9591 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9592
9593 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9594 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9595 HCLGE_VF_NUM_PER_BYTE;
9596 req->vf_bitmap[bmap_index] =
9597 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9598
9599 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9600 if (status)
9601 dev_err(&hdev->pdev->dev,
9602 "Send port txvlan cfg command fail, ret =%d\n",
9603 status);
9604
9605 return status;
9606}
9607
9608static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9609{
9610 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9611 struct hclge_vport_vtag_rx_cfg_cmd *req;
9612 struct hclge_dev *hdev = vport->back;
9613 struct hclge_desc desc;
9614 u16 bmap_index;
9615 int status;
9616
9617 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9618
9619 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9620 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9621 vcfg->strip_tag1_en ? 1 : 0);
9622 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9623 vcfg->strip_tag2_en ? 1 : 0);
9624 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9625 vcfg->vlan1_vlan_prionly ? 1 : 0);
9626 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9627 vcfg->vlan2_vlan_prionly ? 1 : 0);
9628 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9629 vcfg->strip_tag1_discard_en ? 1 : 0);
9630 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9631 vcfg->strip_tag2_discard_en ? 1 : 0);
9632
9633 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9634 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9635 HCLGE_VF_NUM_PER_BYTE;
9636 req->vf_bitmap[bmap_index] =
9637 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9638
9639 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9640 if (status)
9641 dev_err(&hdev->pdev->dev,
9642 "Send port rxvlan cfg command fail, ret =%d\n",
9643 status);
9644
9645 return status;
9646}
9647
9648static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9649 u16 port_base_vlan_state,
9650 u16 vlan_tag, u8 qos)
9651{
9652 int ret;
9653
9654 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9655 vport->txvlan_cfg.accept_tag1 = true;
9656 vport->txvlan_cfg.insert_tag1_en = false;
9657 vport->txvlan_cfg.default_tag1 = 0;
9658 } else {
9659 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9660
9661 vport->txvlan_cfg.accept_tag1 =
9662 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9663 vport->txvlan_cfg.insert_tag1_en = true;
9664 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9665 vlan_tag;
9666 }
9667
9668 vport->txvlan_cfg.accept_untag1 = true;
9669
9670
9671
9672
9673
9674 vport->txvlan_cfg.accept_tag2 = true;
9675 vport->txvlan_cfg.accept_untag2 = true;
9676 vport->txvlan_cfg.insert_tag2_en = false;
9677 vport->txvlan_cfg.default_tag2 = 0;
9678 vport->txvlan_cfg.tag_shift_mode_en = true;
9679
9680 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9681 vport->rxvlan_cfg.strip_tag1_en = false;
9682 vport->rxvlan_cfg.strip_tag2_en =
9683 vport->rxvlan_cfg.rx_vlan_offload_en;
9684 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9685 } else {
9686 vport->rxvlan_cfg.strip_tag1_en =
9687 vport->rxvlan_cfg.rx_vlan_offload_en;
9688 vport->rxvlan_cfg.strip_tag2_en = true;
9689 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9690 }
9691
9692 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9693 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9694 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9695
9696 ret = hclge_set_vlan_tx_offload_cfg(vport);
9697 if (ret)
9698 return ret;
9699
9700 return hclge_set_vlan_rx_offload_cfg(vport);
9701}
9702
9703static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9704{
9705 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9706 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9707 struct hclge_desc desc;
9708 int status;
9709
9710 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9711 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9712 rx_req->ot_fst_vlan_type =
9713 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9714 rx_req->ot_sec_vlan_type =
9715 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9716 rx_req->in_fst_vlan_type =
9717 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9718 rx_req->in_sec_vlan_type =
9719 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9720
9721 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9722 if (status) {
9723 dev_err(&hdev->pdev->dev,
9724 "Send rxvlan protocol type command fail, ret =%d\n",
9725 status);
9726 return status;
9727 }
9728
9729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9730
9731 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9732 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9733 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9734
9735 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9736 if (status)
9737 dev_err(&hdev->pdev->dev,
9738 "Send txvlan protocol type command fail, ret =%d\n",
9739 status);
9740
9741 return status;
9742}
9743
9744static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9745{
9746 struct hclge_vport *vport;
9747 int ret;
9748 int i;
9749
9750 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9751 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9752 HCLGE_FILTER_FE_EGRESS_V1_B,
9753 true, 0);
9754
9755
9756 for (i = 0; i < hdev->num_alloc_vport; i++) {
9757 vport = &hdev->vport[i];
9758 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9759 HCLGE_FILTER_FE_EGRESS, true,
9760 vport->vport_id);
9761 if (ret)
9762 return ret;
9763 vport->cur_vlan_fltr_en = true;
9764 }
9765
9766 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9767 HCLGE_FILTER_FE_INGRESS, true, 0);
9768}
9769
9770static int hclge_init_vlan_type(struct hclge_dev *hdev)
9771{
9772 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
9773 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
9774 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
9775 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
9776 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
9777 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
9778
9779 return hclge_set_vlan_protocol_type(hdev);
9780}
9781
9782static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
9783{
9784 struct hclge_port_base_vlan_config *cfg;
9785 struct hclge_vport *vport;
9786 int ret;
9787 int i;
9788
9789 for (i = 0; i < hdev->num_alloc_vport; i++) {
9790 vport = &hdev->vport[i];
9791 cfg = &vport->port_base_vlan_cfg;
9792
9793 ret = hclge_vlan_offload_cfg(vport, cfg->state,
9794 cfg->vlan_info.vlan_tag,
9795 cfg->vlan_info.qos);
9796 if (ret)
9797 return ret;
9798 }
9799 return 0;
9800}
9801
9802static int hclge_init_vlan_config(struct hclge_dev *hdev)
9803{
9804 struct hnae3_handle *handle = &hdev->vport[0].nic;
9805 int ret;
9806
9807 ret = hclge_init_vlan_filter(hdev);
9808 if (ret)
9809 return ret;
9810
9811 ret = hclge_init_vlan_type(hdev);
9812 if (ret)
9813 return ret;
9814
9815 ret = hclge_init_vport_vlan_offload(hdev);
9816 if (ret)
9817 return ret;
9818
9819 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9820}
9821
9822static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9823 bool writen_to_tbl)
9824{
9825 struct hclge_vport_vlan_cfg *vlan, *tmp;
9826 struct hclge_dev *hdev = vport->back;
9827
9828 mutex_lock(&hdev->vport_lock);
9829
9830 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9831 if (vlan->vlan_id == vlan_id) {
9832 mutex_unlock(&hdev->vport_lock);
9833 return;
9834 }
9835 }
9836
9837 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9838 if (!vlan) {
9839 mutex_unlock(&hdev->vport_lock);
9840 return;
9841 }
9842
9843 vlan->hd_tbl_status = writen_to_tbl;
9844 vlan->vlan_id = vlan_id;
9845
9846 list_add_tail(&vlan->node, &vport->vlan_list);
9847 mutex_unlock(&hdev->vport_lock);
9848}
9849
9850static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9851{
9852 struct hclge_vport_vlan_cfg *vlan, *tmp;
9853 struct hclge_dev *hdev = vport->back;
9854 int ret;
9855
9856 mutex_lock(&hdev->vport_lock);
9857
9858 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9859 if (!vlan->hd_tbl_status) {
9860 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9861 vport->vport_id,
9862 vlan->vlan_id, false);
9863 if (ret) {
9864 dev_err(&hdev->pdev->dev,
9865 "restore vport vlan list failed, ret=%d\n",
9866 ret);
9867
9868 mutex_unlock(&hdev->vport_lock);
9869 return ret;
9870 }
9871 }
9872 vlan->hd_tbl_status = true;
9873 }
9874
9875 mutex_unlock(&hdev->vport_lock);
9876
9877 return 0;
9878}
9879
9880static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9881 bool is_write_tbl)
9882{
9883 struct hclge_vport_vlan_cfg *vlan, *tmp;
9884 struct hclge_dev *hdev = vport->back;
9885
9886 mutex_lock(&hdev->vport_lock);
9887
9888 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9889 if (vlan->vlan_id == vlan_id) {
9890 if (is_write_tbl && vlan->hd_tbl_status)
9891 hclge_set_vlan_filter_hw(hdev,
9892 htons(ETH_P_8021Q),
9893 vport->vport_id,
9894 vlan_id,
9895 true);
9896
9897 list_del(&vlan->node);
9898 kfree(vlan);
9899 break;
9900 }
9901 }
9902
9903 mutex_unlock(&hdev->vport_lock);
9904}
9905
9906void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9907{
9908 struct hclge_vport_vlan_cfg *vlan, *tmp;
9909 struct hclge_dev *hdev = vport->back;
9910
9911 mutex_lock(&hdev->vport_lock);
9912
9913 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9914 if (vlan->hd_tbl_status)
9915 hclge_set_vlan_filter_hw(hdev,
9916 htons(ETH_P_8021Q),
9917 vport->vport_id,
9918 vlan->vlan_id,
9919 true);
9920
9921 vlan->hd_tbl_status = false;
9922 if (is_del_list) {
9923 list_del(&vlan->node);
9924 kfree(vlan);
9925 }
9926 }
9927 clear_bit(vport->vport_id, hdev->vf_vlan_full);
9928 mutex_unlock(&hdev->vport_lock);
9929}
9930
9931void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9932{
9933 struct hclge_vport_vlan_cfg *vlan, *tmp;
9934 struct hclge_vport *vport;
9935 int i;
9936
9937 mutex_lock(&hdev->vport_lock);
9938
9939 for (i = 0; i < hdev->num_alloc_vport; i++) {
9940 vport = &hdev->vport[i];
9941 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9942 list_del(&vlan->node);
9943 kfree(vlan);
9944 }
9945 }
9946
9947 mutex_unlock(&hdev->vport_lock);
9948}
9949
9950void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
9951{
9952 struct hclge_vlan_info *vlan_info;
9953 struct hclge_vport *vport;
9954 u16 vlan_proto;
9955 u16 vlan_id;
9956 u16 state;
9957 int vf_id;
9958 int ret;
9959
9960
9961 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
9962 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
9963 vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
9964 &vport->port_base_vlan_cfg.vlan_info :
9965 &vport->port_base_vlan_cfg.old_vlan_info;
9966
9967 vlan_id = vlan_info->vlan_tag;
9968 vlan_proto = vlan_info->vlan_proto;
9969 state = vport->port_base_vlan_cfg.state;
9970
9971 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9972 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9973 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9974 vport->vport_id,
9975 vlan_id, false);
9976 vport->port_base_vlan_cfg.tbl_sta = ret == 0;
9977 }
9978 }
9979}
9980
9981void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9982{
9983 struct hclge_vport_vlan_cfg *vlan, *tmp;
9984 struct hclge_dev *hdev = vport->back;
9985 int ret;
9986
9987 mutex_lock(&hdev->vport_lock);
9988
9989 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9990 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9991 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9992 vport->vport_id,
9993 vlan->vlan_id, false);
9994 if (ret)
9995 break;
9996 vlan->hd_tbl_status = true;
9997 }
9998 }
9999
10000 mutex_unlock(&hdev->vport_lock);
10001}
10002
10003
10004
10005
10006
10007
10008
10009static void hclge_mac_node_convert_for_reset(struct list_head *list)
10010{
10011 struct hclge_mac_node *mac_node, *tmp;
10012
10013 list_for_each_entry_safe(mac_node, tmp, list, node) {
10014 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10015 mac_node->state = HCLGE_MAC_TO_ADD;
10016 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10017 list_del(&mac_node->node);
10018 kfree(mac_node);
10019 }
10020 }
10021}
10022
10023void hclge_restore_mac_table_common(struct hclge_vport *vport)
10024{
10025 spin_lock_bh(&vport->mac_list_lock);
10026
10027 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10028 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10029 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10030
10031 spin_unlock_bh(&vport->mac_list_lock);
10032}
10033
10034static void hclge_restore_hw_table(struct hclge_dev *hdev)
10035{
10036 struct hclge_vport *vport = &hdev->vport[0];
10037 struct hnae3_handle *handle = &vport->nic;
10038
10039 hclge_restore_mac_table_common(vport);
10040 hclge_restore_vport_port_base_vlan_config(hdev);
10041 hclge_restore_vport_vlan_table(vport);
10042 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10043 hclge_restore_fd_entries(handle);
10044}
10045
10046int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10047{
10048 struct hclge_vport *vport = hclge_get_vport(handle);
10049
10050 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10051 vport->rxvlan_cfg.strip_tag1_en = false;
10052 vport->rxvlan_cfg.strip_tag2_en = enable;
10053 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10054 } else {
10055 vport->rxvlan_cfg.strip_tag1_en = enable;
10056 vport->rxvlan_cfg.strip_tag2_en = true;
10057 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10058 }
10059
10060 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10061 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10062 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10063 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10064
10065 return hclge_set_vlan_rx_offload_cfg(vport);
10066}
10067
10068static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10069{
10070 struct hclge_dev *hdev = vport->back;
10071
10072 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10073 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10074}
10075
10076static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10077 u16 port_base_vlan_state,
10078 struct hclge_vlan_info *new_info,
10079 struct hclge_vlan_info *old_info)
10080{
10081 struct hclge_dev *hdev = vport->back;
10082 int ret;
10083
10084 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10085 hclge_rm_vport_all_vlan_table(vport, false);
10086
10087 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10088 if (ret)
10089 return ret;
10090 return hclge_set_vlan_filter_hw(hdev,
10091 htons(new_info->vlan_proto),
10092 vport->vport_id,
10093 new_info->vlan_tag,
10094 false);
10095 }
10096
10097 vport->port_base_vlan_cfg.tbl_sta = false;
10098
10099
10100 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10101 if (ret)
10102 return ret;
10103
10104 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10105 vport->vport_id, old_info->vlan_tag,
10106 true);
10107 if (ret)
10108 return ret;
10109
10110 return hclge_add_vport_all_vlan_table(vport);
10111}
10112
10113static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10114 const struct hclge_vlan_info *old_cfg)
10115{
10116 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10117 return true;
10118
10119 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10120 return true;
10121
10122 return false;
10123}
10124
10125static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10126 struct hclge_vlan_info *new_info,
10127 struct hclge_vlan_info *old_info)
10128{
10129 struct hclge_dev *hdev = vport->back;
10130 int ret;
10131
10132
10133 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10134 vport->vport_id, new_info->vlan_tag,
10135 false);
10136 if (ret)
10137 return ret;
10138
10139
10140 if (old_info->vlan_tag == 0)
10141 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10142 true, 0);
10143 else
10144 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10145 vport->vport_id,
10146 old_info->vlan_tag, true);
10147 if (ret)
10148 dev_err(&hdev->pdev->dev,
10149 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10150 vport->vport_id, old_info->vlan_tag, ret);
10151
10152 return ret;
10153}
10154
10155int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10156 struct hclge_vlan_info *vlan_info)
10157{
10158 struct hnae3_handle *nic = &vport->nic;
10159 struct hclge_vlan_info *old_vlan_info;
10160 int ret;
10161
10162 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10163
10164 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10165 vlan_info->qos);
10166 if (ret)
10167 return ret;
10168
10169 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10170 goto out;
10171
10172 if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10173 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10174 old_vlan_info);
10175 else
10176 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10177 old_vlan_info);
10178 if (ret)
10179 return ret;
10180
10181out:
10182 vport->port_base_vlan_cfg.state = state;
10183 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10184 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10185 else
10186 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10187
10188 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10189 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10190 vport->port_base_vlan_cfg.tbl_sta = true;
10191 hclge_set_vport_vlan_fltr_change(vport);
10192
10193 return 0;
10194}
10195
10196static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10197 enum hnae3_port_base_vlan_state state,
10198 u16 vlan, u8 qos)
10199{
10200 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10201 if (!vlan && !qos)
10202 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10203
10204 return HNAE3_PORT_BASE_VLAN_ENABLE;
10205 }
10206
10207 if (!vlan && !qos)
10208 return HNAE3_PORT_BASE_VLAN_DISABLE;
10209
10210 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10211 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10212 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10213
10214 return HNAE3_PORT_BASE_VLAN_MODIFY;
10215}
10216
10217static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10218 u16 vlan, u8 qos, __be16 proto)
10219{
10220 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10221 struct hclge_vport *vport = hclge_get_vport(handle);
10222 struct hclge_dev *hdev = vport->back;
10223 struct hclge_vlan_info vlan_info;
10224 u16 state;
10225 int ret;
10226
10227 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10228 return -EOPNOTSUPP;
10229
10230 vport = hclge_get_vf_vport(hdev, vfid);
10231 if (!vport)
10232 return -EINVAL;
10233
10234
10235 if (vlan > VLAN_N_VID - 1 || qos > 7)
10236 return -EINVAL;
10237 if (proto != htons(ETH_P_8021Q))
10238 return -EPROTONOSUPPORT;
10239
10240 state = hclge_get_port_base_vlan_state(vport,
10241 vport->port_base_vlan_cfg.state,
10242 vlan, qos);
10243 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10244 return 0;
10245
10246 vlan_info.vlan_tag = vlan;
10247 vlan_info.qos = qos;
10248 vlan_info.vlan_proto = ntohs(proto);
10249
10250 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10251 if (ret) {
10252 dev_err(&hdev->pdev->dev,
10253 "failed to update port base vlan for vf %d, ret = %d\n",
10254 vfid, ret);
10255 return ret;
10256 }
10257
10258
10259
10260
10261
10262
10263
10264 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10265 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10266 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10267 vport->vport_id,
10268 state, &vlan_info);
10269
10270 return 0;
10271}
10272
10273static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10274{
10275 struct hclge_vlan_info *vlan_info;
10276 struct hclge_vport *vport;
10277 int ret;
10278 int vf;
10279
10280
10281 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10282 vport = &hdev->vport[vf];
10283 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10284
10285 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10286 vport->vport_id,
10287 vlan_info->vlan_tag, true);
10288 if (ret)
10289 dev_err(&hdev->pdev->dev,
10290 "failed to clear vf vlan for vf%d, ret = %d\n",
10291 vf - HCLGE_VF_VPORT_START_NUM, ret);
10292 }
10293}
10294
10295int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10296 u16 vlan_id, bool is_kill)
10297{
10298 struct hclge_vport *vport = hclge_get_vport(handle);
10299 struct hclge_dev *hdev = vport->back;
10300 bool writen_to_tbl = false;
10301 int ret = 0;
10302
10303
10304
10305
10306
10307 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10308 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10309 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10310 return -EBUSY;
10311 }
10312
10313
10314
10315
10316
10317
10318
10319 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10320 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10321 vlan_id, is_kill);
10322 writen_to_tbl = true;
10323 }
10324
10325 if (!ret) {
10326 if (!is_kill)
10327 hclge_add_vport_vlan_table(vport, vlan_id,
10328 writen_to_tbl);
10329 else if (is_kill && vlan_id != 0)
10330 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10331 } else if (is_kill) {
10332
10333
10334
10335
10336 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10337 }
10338
10339 hclge_set_vport_vlan_fltr_change(vport);
10340
10341 return ret;
10342}
10343
10344static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10345{
10346 struct hclge_vport *vport;
10347 int ret;
10348 u16 i;
10349
10350 for (i = 0; i < hdev->num_alloc_vport; i++) {
10351 vport = &hdev->vport[i];
10352 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10353 &vport->state))
10354 continue;
10355
10356 ret = hclge_enable_vport_vlan_filter(vport,
10357 vport->req_vlan_fltr_en);
10358 if (ret) {
10359 dev_err(&hdev->pdev->dev,
10360 "failed to sync vlan filter state for vport%u, ret = %d\n",
10361 vport->vport_id, ret);
10362 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10363 &vport->state);
10364 return;
10365 }
10366 }
10367}
10368
10369static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10370{
10371#define HCLGE_MAX_SYNC_COUNT 60
10372
10373 int i, ret, sync_cnt = 0;
10374 u16 vlan_id;
10375
10376
10377 for (i = 0; i < hdev->num_alloc_vport; i++) {
10378 struct hclge_vport *vport = &hdev->vport[i];
10379
10380 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10381 VLAN_N_VID);
10382 while (vlan_id != VLAN_N_VID) {
10383 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10384 vport->vport_id, vlan_id,
10385 true);
10386 if (ret && ret != -EINVAL)
10387 return;
10388
10389 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10390 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10391 hclge_set_vport_vlan_fltr_change(vport);
10392
10393 sync_cnt++;
10394 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10395 return;
10396
10397 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10398 VLAN_N_VID);
10399 }
10400 }
10401
10402 hclge_sync_vlan_fltr_state(hdev);
10403}
10404
10405static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10406{
10407 struct hclge_config_max_frm_size_cmd *req;
10408 struct hclge_desc desc;
10409
10410 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10411
10412 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10413 req->max_frm_size = cpu_to_le16(new_mps);
10414 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10415
10416 return hclge_cmd_send(&hdev->hw, &desc, 1);
10417}
10418
10419static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10420{
10421 struct hclge_vport *vport = hclge_get_vport(handle);
10422
10423 return hclge_set_vport_mtu(vport, new_mtu);
10424}
10425
10426int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10427{
10428 struct hclge_dev *hdev = vport->back;
10429 int i, max_frm_size, ret;
10430
10431
10432 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10433 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10434 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10435 return -EINVAL;
10436
10437 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10438 mutex_lock(&hdev->vport_lock);
10439
10440 if (vport->vport_id && max_frm_size > hdev->mps) {
10441 mutex_unlock(&hdev->vport_lock);
10442 return -EINVAL;
10443 } else if (vport->vport_id) {
10444 vport->mps = max_frm_size;
10445 mutex_unlock(&hdev->vport_lock);
10446 return 0;
10447 }
10448
10449
10450 for (i = 1; i < hdev->num_alloc_vport; i++)
10451 if (max_frm_size < hdev->vport[i].mps) {
10452 mutex_unlock(&hdev->vport_lock);
10453 return -EINVAL;
10454 }
10455
10456 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10457
10458 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10459 if (ret) {
10460 dev_err(&hdev->pdev->dev,
10461 "Change mtu fail, ret =%d\n", ret);
10462 goto out;
10463 }
10464
10465 hdev->mps = max_frm_size;
10466 vport->mps = max_frm_size;
10467
10468 ret = hclge_buffer_alloc(hdev);
10469 if (ret)
10470 dev_err(&hdev->pdev->dev,
10471 "Allocate buffer fail, ret =%d\n", ret);
10472
10473out:
10474 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10475 mutex_unlock(&hdev->vport_lock);
10476 return ret;
10477}
10478
10479static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10480 bool enable)
10481{
10482 struct hclge_reset_tqp_queue_cmd *req;
10483 struct hclge_desc desc;
10484 int ret;
10485
10486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10487
10488 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10489 req->tqp_id = cpu_to_le16(queue_id);
10490 if (enable)
10491 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10492
10493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10494 if (ret) {
10495 dev_err(&hdev->pdev->dev,
10496 "Send tqp reset cmd error, status =%d\n", ret);
10497 return ret;
10498 }
10499
10500 return 0;
10501}
10502
10503static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10504 u8 *reset_status)
10505{
10506 struct hclge_reset_tqp_queue_cmd *req;
10507 struct hclge_desc desc;
10508 int ret;
10509
10510 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10511
10512 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10513 req->tqp_id = cpu_to_le16(queue_id);
10514
10515 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10516 if (ret) {
10517 dev_err(&hdev->pdev->dev,
10518 "Get reset status error, status =%d\n", ret);
10519 return ret;
10520 }
10521
10522 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10523
10524 return 0;
10525}
10526
10527u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10528{
10529 struct hclge_comm_tqp *tqp;
10530 struct hnae3_queue *queue;
10531
10532 queue = handle->kinfo.tqp[queue_id];
10533 tqp = container_of(queue, struct hclge_comm_tqp, q);
10534
10535 return tqp->index;
10536}
10537
10538static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10539{
10540 struct hclge_vport *vport = hclge_get_vport(handle);
10541 struct hclge_dev *hdev = vport->back;
10542 u16 reset_try_times = 0;
10543 u8 reset_status;
10544 u16 queue_gid;
10545 int ret;
10546 u16 i;
10547
10548 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10549 queue_gid = hclge_covert_handle_qid_global(handle, i);
10550 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10551 if (ret) {
10552 dev_err(&hdev->pdev->dev,
10553 "failed to send reset tqp cmd, ret = %d\n",
10554 ret);
10555 return ret;
10556 }
10557
10558 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10559 ret = hclge_get_reset_status(hdev, queue_gid,
10560 &reset_status);
10561 if (ret)
10562 return ret;
10563
10564 if (reset_status)
10565 break;
10566
10567
10568 usleep_range(1000, 1200);
10569 }
10570
10571 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10572 dev_err(&hdev->pdev->dev,
10573 "wait for tqp hw reset timeout\n");
10574 return -ETIME;
10575 }
10576
10577 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10578 if (ret) {
10579 dev_err(&hdev->pdev->dev,
10580 "failed to deassert soft reset, ret = %d\n",
10581 ret);
10582 return ret;
10583 }
10584 reset_try_times = 0;
10585 }
10586 return 0;
10587}
10588
10589static int hclge_reset_rcb(struct hnae3_handle *handle)
10590{
10591#define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10592#define HCLGE_RESET_RCB_SUCCESS 1U
10593
10594 struct hclge_vport *vport = hclge_get_vport(handle);
10595 struct hclge_dev *hdev = vport->back;
10596 struct hclge_reset_cmd *req;
10597 struct hclge_desc desc;
10598 u8 return_status;
10599 u16 queue_gid;
10600 int ret;
10601
10602 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10603
10604 req = (struct hclge_reset_cmd *)desc.data;
10605 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10606 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10607 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10608 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10609
10610 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10611 if (ret) {
10612 dev_err(&hdev->pdev->dev,
10613 "failed to send rcb reset cmd, ret = %d\n", ret);
10614 return ret;
10615 }
10616
10617 return_status = req->fun_reset_rcb_return_status;
10618 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10619 return 0;
10620
10621 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10622 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10623 return_status);
10624 return -EIO;
10625 }
10626
10627
10628
10629
10630 return hclge_reset_tqp_cmd(handle);
10631}
10632
10633int hclge_reset_tqp(struct hnae3_handle *handle)
10634{
10635 struct hclge_vport *vport = hclge_get_vport(handle);
10636 struct hclge_dev *hdev = vport->back;
10637 int ret;
10638
10639
10640 if (!vport->vport_id) {
10641 ret = hclge_tqp_enable(handle, false);
10642 if (ret) {
10643 dev_err(&hdev->pdev->dev,
10644 "failed to disable tqp, ret = %d\n", ret);
10645 return ret;
10646 }
10647 }
10648
10649 return hclge_reset_rcb(handle);
10650}
10651
10652static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10653{
10654 struct hclge_vport *vport = hclge_get_vport(handle);
10655 struct hclge_dev *hdev = vport->back;
10656
10657 return hdev->fw_version;
10658}
10659
10660static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10661{
10662 struct phy_device *phydev = hdev->hw.mac.phydev;
10663
10664 if (!phydev)
10665 return;
10666
10667 phy_set_asym_pause(phydev, rx_en, tx_en);
10668}
10669
10670static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10671{
10672 int ret;
10673
10674 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10675 return 0;
10676
10677 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10678 if (ret)
10679 dev_err(&hdev->pdev->dev,
10680 "configure pauseparam error, ret = %d.\n", ret);
10681
10682 return ret;
10683}
10684
10685int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10686{
10687 struct phy_device *phydev = hdev->hw.mac.phydev;
10688 u16 remote_advertising = 0;
10689 u16 local_advertising;
10690 u32 rx_pause, tx_pause;
10691 u8 flowctl;
10692
10693 if (!phydev->link || !phydev->autoneg)
10694 return 0;
10695
10696 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10697
10698 if (phydev->pause)
10699 remote_advertising = LPA_PAUSE_CAP;
10700
10701 if (phydev->asym_pause)
10702 remote_advertising |= LPA_PAUSE_ASYM;
10703
10704 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10705 remote_advertising);
10706 tx_pause = flowctl & FLOW_CTRL_TX;
10707 rx_pause = flowctl & FLOW_CTRL_RX;
10708
10709 if (phydev->duplex == HCLGE_MAC_HALF) {
10710 tx_pause = 0;
10711 rx_pause = 0;
10712 }
10713
10714 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10715}
10716
10717static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10718 u32 *rx_en, u32 *tx_en)
10719{
10720 struct hclge_vport *vport = hclge_get_vport(handle);
10721 struct hclge_dev *hdev = vport->back;
10722 u8 media_type = hdev->hw.mac.media_type;
10723
10724 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10725 hclge_get_autoneg(handle) : 0;
10726
10727 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10728 *rx_en = 0;
10729 *tx_en = 0;
10730 return;
10731 }
10732
10733 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10734 *rx_en = 1;
10735 *tx_en = 0;
10736 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10737 *tx_en = 1;
10738 *rx_en = 0;
10739 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10740 *rx_en = 1;
10741 *tx_en = 1;
10742 } else {
10743 *rx_en = 0;
10744 *tx_en = 0;
10745 }
10746}
10747
10748static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10749 u32 rx_en, u32 tx_en)
10750{
10751 if (rx_en && tx_en)
10752 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10753 else if (rx_en && !tx_en)
10754 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10755 else if (!rx_en && tx_en)
10756 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10757 else
10758 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10759
10760 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10761}
10762
10763static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10764 u32 rx_en, u32 tx_en)
10765{
10766 struct hclge_vport *vport = hclge_get_vport(handle);
10767 struct hclge_dev *hdev = vport->back;
10768 struct phy_device *phydev = hdev->hw.mac.phydev;
10769 u32 fc_autoneg;
10770
10771 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10772 fc_autoneg = hclge_get_autoneg(handle);
10773 if (auto_neg != fc_autoneg) {
10774 dev_info(&hdev->pdev->dev,
10775 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10776 return -EOPNOTSUPP;
10777 }
10778 }
10779
10780 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10781 dev_info(&hdev->pdev->dev,
10782 "Priority flow control enabled. Cannot set link flow control.\n");
10783 return -EOPNOTSUPP;
10784 }
10785
10786 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10787
10788 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10789
10790 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10791 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10792
10793 if (phydev)
10794 return phy_start_aneg(phydev);
10795
10796 return -EOPNOTSUPP;
10797}
10798
10799static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10800 u8 *auto_neg, u32 *speed, u8 *duplex)
10801{
10802 struct hclge_vport *vport = hclge_get_vport(handle);
10803 struct hclge_dev *hdev = vport->back;
10804
10805 if (speed)
10806 *speed = hdev->hw.mac.speed;
10807 if (duplex)
10808 *duplex = hdev->hw.mac.duplex;
10809 if (auto_neg)
10810 *auto_neg = hdev->hw.mac.autoneg;
10811}
10812
10813static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10814 u8 *module_type)
10815{
10816 struct hclge_vport *vport = hclge_get_vport(handle);
10817 struct hclge_dev *hdev = vport->back;
10818
10819
10820
10821
10822
10823 hclge_update_port_info(hdev);
10824
10825 if (media_type)
10826 *media_type = hdev->hw.mac.media_type;
10827
10828 if (module_type)
10829 *module_type = hdev->hw.mac.module_type;
10830}
10831
10832static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10833 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10834{
10835 struct hclge_vport *vport = hclge_get_vport(handle);
10836 struct hclge_dev *hdev = vport->back;
10837 struct phy_device *phydev = hdev->hw.mac.phydev;
10838 int mdix_ctrl, mdix, is_resolved;
10839 unsigned int retval;
10840
10841 if (!phydev) {
10842 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10843 *tp_mdix = ETH_TP_MDI_INVALID;
10844 return;
10845 }
10846
10847 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10848
10849 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10850 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10851 HCLGE_PHY_MDIX_CTRL_S);
10852
10853 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10854 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10855 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10856
10857 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10858
10859 switch (mdix_ctrl) {
10860 case 0x0:
10861 *tp_mdix_ctrl = ETH_TP_MDI;
10862 break;
10863 case 0x1:
10864 *tp_mdix_ctrl = ETH_TP_MDI_X;
10865 break;
10866 case 0x3:
10867 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10868 break;
10869 default:
10870 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10871 break;
10872 }
10873
10874 if (!is_resolved)
10875 *tp_mdix = ETH_TP_MDI_INVALID;
10876 else if (mdix)
10877 *tp_mdix = ETH_TP_MDI_X;
10878 else
10879 *tp_mdix = ETH_TP_MDI;
10880}
10881
10882static void hclge_info_show(struct hclge_dev *hdev)
10883{
10884 struct device *dev = &hdev->pdev->dev;
10885
10886 dev_info(dev, "PF info begin:\n");
10887
10888 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10889 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10890 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10891 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10892 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10893 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10894 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10895 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10896 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10897 dev_info(dev, "This is %s PF\n",
10898 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10899 dev_info(dev, "DCB %s\n",
10900 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10901 dev_info(dev, "MQPRIO %s\n",
10902 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10903 dev_info(dev, "Default tx spare buffer size: %u\n",
10904 hdev->tx_spare_buf_size);
10905
10906 dev_info(dev, "PF info end.\n");
10907}
10908
10909static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10910 struct hclge_vport *vport)
10911{
10912 struct hnae3_client *client = vport->nic.client;
10913 struct hclge_dev *hdev = ae_dev->priv;
10914 int rst_cnt = hdev->rst_stats.reset_cnt;
10915 int ret;
10916
10917 ret = client->ops->init_instance(&vport->nic);
10918 if (ret)
10919 return ret;
10920
10921 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10922 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10923 rst_cnt != hdev->rst_stats.reset_cnt) {
10924 ret = -EBUSY;
10925 goto init_nic_err;
10926 }
10927
10928
10929 ret = hclge_config_nic_hw_error(hdev, true);
10930 if (ret) {
10931 dev_err(&ae_dev->pdev->dev,
10932 "fail(%d) to enable hw error interrupts\n", ret);
10933 goto init_nic_err;
10934 }
10935
10936 hnae3_set_client_init_flag(client, ae_dev, 1);
10937
10938 if (netif_msg_drv(&hdev->vport->nic))
10939 hclge_info_show(hdev);
10940
10941 return ret;
10942
10943init_nic_err:
10944 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10945 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10946 msleep(HCLGE_WAIT_RESET_DONE);
10947
10948 client->ops->uninit_instance(&vport->nic, 0);
10949
10950 return ret;
10951}
10952
10953static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10954 struct hclge_vport *vport)
10955{
10956 struct hclge_dev *hdev = ae_dev->priv;
10957 struct hnae3_client *client;
10958 int rst_cnt;
10959 int ret;
10960
10961 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10962 !hdev->nic_client)
10963 return 0;
10964
10965 client = hdev->roce_client;
10966 ret = hclge_init_roce_base_info(vport);
10967 if (ret)
10968 return ret;
10969
10970 rst_cnt = hdev->rst_stats.reset_cnt;
10971 ret = client->ops->init_instance(&vport->roce);
10972 if (ret)
10973 return ret;
10974
10975 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10976 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10977 rst_cnt != hdev->rst_stats.reset_cnt) {
10978 ret = -EBUSY;
10979 goto init_roce_err;
10980 }
10981
10982
10983 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10984 if (ret) {
10985 dev_err(&ae_dev->pdev->dev,
10986 "fail(%d) to enable roce ras interrupts\n", ret);
10987 goto init_roce_err;
10988 }
10989
10990 hnae3_set_client_init_flag(client, ae_dev, 1);
10991
10992 return 0;
10993
10994init_roce_err:
10995 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10996 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10997 msleep(HCLGE_WAIT_RESET_DONE);
10998
10999 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11000
11001 return ret;
11002}
11003
11004static int hclge_init_client_instance(struct hnae3_client *client,
11005 struct hnae3_ae_dev *ae_dev)
11006{
11007 struct hclge_dev *hdev = ae_dev->priv;
11008 struct hclge_vport *vport = &hdev->vport[0];
11009 int ret;
11010
11011 switch (client->type) {
11012 case HNAE3_CLIENT_KNIC:
11013 hdev->nic_client = client;
11014 vport->nic.client = client;
11015 ret = hclge_init_nic_client_instance(ae_dev, vport);
11016 if (ret)
11017 goto clear_nic;
11018
11019 ret = hclge_init_roce_client_instance(ae_dev, vport);
11020 if (ret)
11021 goto clear_roce;
11022
11023 break;
11024 case HNAE3_CLIENT_ROCE:
11025 if (hnae3_dev_roce_supported(hdev)) {
11026 hdev->roce_client = client;
11027 vport->roce.client = client;
11028 }
11029
11030 ret = hclge_init_roce_client_instance(ae_dev, vport);
11031 if (ret)
11032 goto clear_roce;
11033
11034 break;
11035 default:
11036 return -EINVAL;
11037 }
11038
11039 return 0;
11040
11041clear_nic:
11042 hdev->nic_client = NULL;
11043 vport->nic.client = NULL;
11044 return ret;
11045clear_roce:
11046 hdev->roce_client = NULL;
11047 vport->roce.client = NULL;
11048 return ret;
11049}
11050
11051static void hclge_uninit_client_instance(struct hnae3_client *client,
11052 struct hnae3_ae_dev *ae_dev)
11053{
11054 struct hclge_dev *hdev = ae_dev->priv;
11055 struct hclge_vport *vport = &hdev->vport[0];
11056
11057 if (hdev->roce_client) {
11058 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11059 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11060 msleep(HCLGE_WAIT_RESET_DONE);
11061
11062 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11063 hdev->roce_client = NULL;
11064 vport->roce.client = NULL;
11065 }
11066 if (client->type == HNAE3_CLIENT_ROCE)
11067 return;
11068 if (hdev->nic_client && client->ops->uninit_instance) {
11069 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11070 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11071 msleep(HCLGE_WAIT_RESET_DONE);
11072
11073 client->ops->uninit_instance(&vport->nic, 0);
11074 hdev->nic_client = NULL;
11075 vport->nic.client = NULL;
11076 }
11077}
11078
11079static int hclge_dev_mem_map(struct hclge_dev *hdev)
11080{
11081 struct pci_dev *pdev = hdev->pdev;
11082 struct hclge_hw *hw = &hdev->hw;
11083
11084
11085 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11086 return 0;
11087
11088 hw->hw.mem_base =
11089 devm_ioremap_wc(&pdev->dev,
11090 pci_resource_start(pdev, HCLGE_MEM_BAR),
11091 pci_resource_len(pdev, HCLGE_MEM_BAR));
11092 if (!hw->hw.mem_base) {
11093 dev_err(&pdev->dev, "failed to map device memory\n");
11094 return -EFAULT;
11095 }
11096
11097 return 0;
11098}
11099
11100static int hclge_pci_init(struct hclge_dev *hdev)
11101{
11102 struct pci_dev *pdev = hdev->pdev;
11103 struct hclge_hw *hw;
11104 int ret;
11105
11106 ret = pci_enable_device(pdev);
11107 if (ret) {
11108 dev_err(&pdev->dev, "failed to enable PCI device\n");
11109 return ret;
11110 }
11111
11112 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11113 if (ret) {
11114 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11115 if (ret) {
11116 dev_err(&pdev->dev,
11117 "can't set consistent PCI DMA");
11118 goto err_disable_device;
11119 }
11120 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11121 }
11122
11123 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11124 if (ret) {
11125 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11126 goto err_disable_device;
11127 }
11128
11129 pci_set_master(pdev);
11130 hw = &hdev->hw;
11131 hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11132 if (!hw->hw.io_base) {
11133 dev_err(&pdev->dev, "Can't map configuration register space\n");
11134 ret = -ENOMEM;
11135 goto err_clr_master;
11136 }
11137
11138 ret = hclge_dev_mem_map(hdev);
11139 if (ret)
11140 goto err_unmap_io_base;
11141
11142 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11143
11144 return 0;
11145
11146err_unmap_io_base:
11147 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11148err_clr_master:
11149 pci_clear_master(pdev);
11150 pci_release_regions(pdev);
11151err_disable_device:
11152 pci_disable_device(pdev);
11153
11154 return ret;
11155}
11156
11157static void hclge_pci_uninit(struct hclge_dev *hdev)
11158{
11159 struct pci_dev *pdev = hdev->pdev;
11160
11161 if (hdev->hw.hw.mem_base)
11162 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11163
11164 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11165 pci_free_irq_vectors(pdev);
11166 pci_clear_master(pdev);
11167 pci_release_mem_regions(pdev);
11168 pci_disable_device(pdev);
11169}
11170
11171static void hclge_state_init(struct hclge_dev *hdev)
11172{
11173 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11174 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11175 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11176 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11177 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11178 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11179 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11180}
11181
11182static void hclge_state_uninit(struct hclge_dev *hdev)
11183{
11184 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11185 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11186
11187 if (hdev->reset_timer.function)
11188 del_timer_sync(&hdev->reset_timer);
11189 if (hdev->service_task.work.func)
11190 cancel_delayed_work_sync(&hdev->service_task);
11191}
11192
11193static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11194 enum hnae3_reset_type rst_type)
11195{
11196#define HCLGE_RESET_RETRY_WAIT_MS 500
11197#define HCLGE_RESET_RETRY_CNT 5
11198
11199 struct hclge_dev *hdev = ae_dev->priv;
11200 int retry_cnt = 0;
11201 int ret;
11202
11203 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11204 down(&hdev->reset_sem);
11205 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11206 hdev->reset_type = rst_type;
11207 ret = hclge_reset_prepare(hdev);
11208 if (!ret && !hdev->reset_pending)
11209 break;
11210
11211 dev_err(&hdev->pdev->dev,
11212 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11213 ret, hdev->reset_pending, retry_cnt);
11214 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11215 up(&hdev->reset_sem);
11216 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11217 }
11218
11219
11220 hclge_enable_vector(&hdev->misc_vector, false);
11221 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11222
11223 if (hdev->reset_type == HNAE3_FLR_RESET)
11224 hdev->rst_stats.flr_rst_cnt++;
11225}
11226
11227static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11228{
11229 struct hclge_dev *hdev = ae_dev->priv;
11230 int ret;
11231
11232 hclge_enable_vector(&hdev->misc_vector, true);
11233
11234 ret = hclge_reset_rebuild(hdev);
11235 if (ret)
11236 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11237
11238 hdev->reset_type = HNAE3_NONE_RESET;
11239 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11240 up(&hdev->reset_sem);
11241}
11242
11243static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11244{
11245 u16 i;
11246
11247 for (i = 0; i < hdev->num_alloc_vport; i++) {
11248 struct hclge_vport *vport = &hdev->vport[i];
11249 int ret;
11250
11251
11252 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11253 if (ret)
11254 dev_warn(&hdev->pdev->dev,
11255 "clear vport(%u) rst failed %d!\n",
11256 vport->vport_id, ret);
11257 }
11258}
11259
11260static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11261{
11262 struct hclge_desc desc;
11263 int ret;
11264
11265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11266
11267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11268
11269
11270
11271
11272
11273
11274 if (ret && ret != -EOPNOTSUPP) {
11275 dev_err(&hdev->pdev->dev,
11276 "failed to clear hw resource, ret = %d\n", ret);
11277 return ret;
11278 }
11279 return 0;
11280}
11281
11282static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11283{
11284 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11285 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11286}
11287
11288static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11289{
11290 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11291 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11292}
11293
11294static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11295{
11296 struct pci_dev *pdev = ae_dev->pdev;
11297 struct hclge_dev *hdev;
11298 int ret;
11299
11300 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11301 if (!hdev)
11302 return -ENOMEM;
11303
11304 hdev->pdev = pdev;
11305 hdev->ae_dev = ae_dev;
11306 hdev->reset_type = HNAE3_NONE_RESET;
11307 hdev->reset_level = HNAE3_FUNC_RESET;
11308 ae_dev->priv = hdev;
11309
11310
11311 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11312
11313 mutex_init(&hdev->vport_lock);
11314 spin_lock_init(&hdev->fd_rule_lock);
11315 sema_init(&hdev->reset_sem, 1);
11316
11317 ret = hclge_pci_init(hdev);
11318 if (ret)
11319 goto out;
11320
11321 ret = hclge_devlink_init(hdev);
11322 if (ret)
11323 goto err_pci_uninit;
11324
11325
11326 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11327 if (ret)
11328 goto err_devlink_uninit;
11329
11330
11331 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11332 true, hdev->reset_pending);
11333 if (ret)
11334 goto err_cmd_uninit;
11335
11336 ret = hclge_clear_hw_resource(hdev);
11337 if (ret)
11338 goto err_cmd_uninit;
11339
11340 ret = hclge_get_cap(hdev);
11341 if (ret)
11342 goto err_cmd_uninit;
11343
11344 ret = hclge_query_dev_specs(hdev);
11345 if (ret) {
11346 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11347 ret);
11348 goto err_cmd_uninit;
11349 }
11350
11351 ret = hclge_configure(hdev);
11352 if (ret) {
11353 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11354 goto err_cmd_uninit;
11355 }
11356
11357 ret = hclge_init_msi(hdev);
11358 if (ret) {
11359 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11360 goto err_cmd_uninit;
11361 }
11362
11363 ret = hclge_misc_irq_init(hdev);
11364 if (ret)
11365 goto err_msi_uninit;
11366
11367 ret = hclge_alloc_tqps(hdev);
11368 if (ret) {
11369 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11370 goto err_msi_irq_uninit;
11371 }
11372
11373 ret = hclge_alloc_vport(hdev);
11374 if (ret)
11375 goto err_msi_irq_uninit;
11376
11377 ret = hclge_map_tqp(hdev);
11378 if (ret)
11379 goto err_msi_irq_uninit;
11380
11381 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11382 !hnae3_dev_phy_imp_supported(hdev)) {
11383 ret = hclge_mac_mdio_config(hdev);
11384 if (ret)
11385 goto err_msi_irq_uninit;
11386 }
11387
11388 ret = hclge_init_umv_space(hdev);
11389 if (ret)
11390 goto err_mdiobus_unreg;
11391
11392 ret = hclge_mac_init(hdev);
11393 if (ret) {
11394 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11395 goto err_mdiobus_unreg;
11396 }
11397
11398 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11399 if (ret) {
11400 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11401 goto err_mdiobus_unreg;
11402 }
11403
11404 ret = hclge_config_gro(hdev);
11405 if (ret)
11406 goto err_mdiobus_unreg;
11407
11408 ret = hclge_init_vlan_config(hdev);
11409 if (ret) {
11410 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11411 goto err_mdiobus_unreg;
11412 }
11413
11414 ret = hclge_tm_schd_init(hdev);
11415 if (ret) {
11416 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11417 goto err_mdiobus_unreg;
11418 }
11419
11420 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11421 &hdev->rss_cfg);
11422 if (ret) {
11423 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11424 goto err_mdiobus_unreg;
11425 }
11426
11427 ret = hclge_rss_init_hw(hdev);
11428 if (ret) {
11429 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11430 goto err_mdiobus_unreg;
11431 }
11432
11433 ret = init_mgr_tbl(hdev);
11434 if (ret) {
11435 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11436 goto err_mdiobus_unreg;
11437 }
11438
11439 ret = hclge_init_fd_config(hdev);
11440 if (ret) {
11441 dev_err(&pdev->dev,
11442 "fd table init fail, ret=%d\n", ret);
11443 goto err_mdiobus_unreg;
11444 }
11445
11446 ret = hclge_ptp_init(hdev);
11447 if (ret)
11448 goto err_mdiobus_unreg;
11449
11450 INIT_KFIFO(hdev->mac_tnl_log);
11451
11452 hclge_dcb_ops_set(hdev);
11453
11454 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11455 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11456
11457
11458
11459
11460 hclge_misc_affinity_setup(hdev);
11461
11462 hclge_clear_all_event_cause(hdev);
11463 hclge_clear_resetting_state(hdev);
11464
11465
11466 if (hnae3_dev_ras_imp_supported(hdev))
11467 hclge_handle_occurred_error(hdev);
11468 else
11469 hclge_handle_all_hns_hw_errors(ae_dev);
11470
11471
11472
11473
11474 if (ae_dev->hw_err_reset_req) {
11475 enum hnae3_reset_type reset_level;
11476
11477 reset_level = hclge_get_reset_level(ae_dev,
11478 &ae_dev->hw_err_reset_req);
11479 hclge_set_def_reset_request(ae_dev, reset_level);
11480 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11481 }
11482
11483 hclge_init_rxd_adv_layout(hdev);
11484
11485
11486 hclge_enable_vector(&hdev->misc_vector, true);
11487
11488 hclge_state_init(hdev);
11489 hdev->last_reset_time = jiffies;
11490
11491 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11492 HCLGE_DRIVER_NAME);
11493
11494 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11495
11496 return 0;
11497
11498err_mdiobus_unreg:
11499 if (hdev->hw.mac.phydev)
11500 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11501err_msi_irq_uninit:
11502 hclge_misc_irq_uninit(hdev);
11503err_msi_uninit:
11504 pci_free_irq_vectors(pdev);
11505err_cmd_uninit:
11506 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11507err_devlink_uninit:
11508 hclge_devlink_uninit(hdev);
11509err_pci_uninit:
11510 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11511 pci_clear_master(pdev);
11512 pci_release_regions(pdev);
11513 pci_disable_device(pdev);
11514out:
11515 mutex_destroy(&hdev->vport_lock);
11516 return ret;
11517}
11518
11519static void hclge_stats_clear(struct hclge_dev *hdev)
11520{
11521 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11522}
11523
11524static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11525{
11526 return hclge_config_switch_param(hdev, vf, enable,
11527 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11528}
11529
11530static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11531{
11532 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11533 HCLGE_FILTER_FE_NIC_INGRESS_B,
11534 enable, vf);
11535}
11536
11537static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11538{
11539 int ret;
11540
11541 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11542 if (ret) {
11543 dev_err(&hdev->pdev->dev,
11544 "Set vf %d mac spoof check %s failed, ret=%d\n",
11545 vf, enable ? "on" : "off", ret);
11546 return ret;
11547 }
11548
11549 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11550 if (ret)
11551 dev_err(&hdev->pdev->dev,
11552 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11553 vf, enable ? "on" : "off", ret);
11554
11555 return ret;
11556}
11557
11558static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11559 bool enable)
11560{
11561 struct hclge_vport *vport = hclge_get_vport(handle);
11562 struct hclge_dev *hdev = vport->back;
11563 u32 new_spoofchk = enable ? 1 : 0;
11564 int ret;
11565
11566 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11567 return -EOPNOTSUPP;
11568
11569 vport = hclge_get_vf_vport(hdev, vf);
11570 if (!vport)
11571 return -EINVAL;
11572
11573 if (vport->vf_info.spoofchk == new_spoofchk)
11574 return 0;
11575
11576 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11577 dev_warn(&hdev->pdev->dev,
11578 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11579 vf);
11580 else if (enable && hclge_is_umv_space_full(vport, true))
11581 dev_warn(&hdev->pdev->dev,
11582 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11583 vf);
11584
11585 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11586 if (ret)
11587 return ret;
11588
11589 vport->vf_info.spoofchk = new_spoofchk;
11590 return 0;
11591}
11592
11593static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11594{
11595 struct hclge_vport *vport = hdev->vport;
11596 int ret;
11597 int i;
11598
11599 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11600 return 0;
11601
11602
11603 for (i = 0; i < hdev->num_alloc_vport; i++) {
11604 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11605 vport->vf_info.spoofchk);
11606 if (ret)
11607 return ret;
11608
11609 vport++;
11610 }
11611
11612 return 0;
11613}
11614
11615static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11616{
11617 struct hclge_vport *vport = hclge_get_vport(handle);
11618 struct hclge_dev *hdev = vport->back;
11619 u32 new_trusted = enable ? 1 : 0;
11620
11621 vport = hclge_get_vf_vport(hdev, vf);
11622 if (!vport)
11623 return -EINVAL;
11624
11625 if (vport->vf_info.trusted == new_trusted)
11626 return 0;
11627
11628 vport->vf_info.trusted = new_trusted;
11629 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11630 hclge_task_schedule(hdev, 0);
11631
11632 return 0;
11633}
11634
11635static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11636{
11637 int ret;
11638 int vf;
11639
11640
11641 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11642 struct hclge_vport *vport = &hdev->vport[vf];
11643
11644 vport->vf_info.max_tx_rate = 0;
11645 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11646 if (ret)
11647 dev_err(&hdev->pdev->dev,
11648 "vf%d failed to reset to default, ret=%d\n",
11649 vf - HCLGE_VF_VPORT_START_NUM, ret);
11650 }
11651}
11652
11653static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11654 int min_tx_rate, int max_tx_rate)
11655{
11656 if (min_tx_rate != 0 ||
11657 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11658 dev_err(&hdev->pdev->dev,
11659 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11660 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11661 return -EINVAL;
11662 }
11663
11664 return 0;
11665}
11666
11667static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11668 int min_tx_rate, int max_tx_rate, bool force)
11669{
11670 struct hclge_vport *vport = hclge_get_vport(handle);
11671 struct hclge_dev *hdev = vport->back;
11672 int ret;
11673
11674 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11675 if (ret)
11676 return ret;
11677
11678 vport = hclge_get_vf_vport(hdev, vf);
11679 if (!vport)
11680 return -EINVAL;
11681
11682 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11683 return 0;
11684
11685 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11686 if (ret)
11687 return ret;
11688
11689 vport->vf_info.max_tx_rate = max_tx_rate;
11690
11691 return 0;
11692}
11693
11694static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11695{
11696 struct hnae3_handle *handle = &hdev->vport->nic;
11697 struct hclge_vport *vport;
11698 int ret;
11699 int vf;
11700
11701
11702 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11703 vport = hclge_get_vf_vport(hdev, vf);
11704 if (!vport)
11705 return -EINVAL;
11706
11707
11708
11709
11710 if (!vport->vf_info.max_tx_rate)
11711 continue;
11712
11713 ret = hclge_set_vf_rate(handle, vf, 0,
11714 vport->vf_info.max_tx_rate, true);
11715 if (ret) {
11716 dev_err(&hdev->pdev->dev,
11717 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11718 vf, vport->vf_info.max_tx_rate, ret);
11719 return ret;
11720 }
11721 }
11722
11723 return 0;
11724}
11725
11726static void hclge_reset_vport_state(struct hclge_dev *hdev)
11727{
11728 struct hclge_vport *vport = hdev->vport;
11729 int i;
11730
11731 for (i = 0; i < hdev->num_alloc_vport; i++) {
11732 hclge_vport_stop(vport);
11733 vport++;
11734 }
11735}
11736
11737static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11738{
11739 struct hclge_dev *hdev = ae_dev->priv;
11740 struct pci_dev *pdev = ae_dev->pdev;
11741 int ret;
11742
11743 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11744
11745 hclge_stats_clear(hdev);
11746
11747
11748
11749 if (hdev->reset_type == HNAE3_IMP_RESET ||
11750 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11751 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11752 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11753 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11754 hclge_reset_umv_space(hdev);
11755 }
11756
11757 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11758 true, hdev->reset_pending);
11759 if (ret) {
11760 dev_err(&pdev->dev, "Cmd queue init failed\n");
11761 return ret;
11762 }
11763
11764 ret = hclge_map_tqp(hdev);
11765 if (ret) {
11766 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11767 return ret;
11768 }
11769
11770 ret = hclge_mac_init(hdev);
11771 if (ret) {
11772 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11773 return ret;
11774 }
11775
11776 ret = hclge_tp_port_init(hdev);
11777 if (ret) {
11778 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11779 ret);
11780 return ret;
11781 }
11782
11783 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11784 if (ret) {
11785 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11786 return ret;
11787 }
11788
11789 ret = hclge_config_gro(hdev);
11790 if (ret)
11791 return ret;
11792
11793 ret = hclge_init_vlan_config(hdev);
11794 if (ret) {
11795 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11796 return ret;
11797 }
11798
11799 ret = hclge_tm_init_hw(hdev, true);
11800 if (ret) {
11801 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11802 return ret;
11803 }
11804
11805 ret = hclge_rss_init_hw(hdev);
11806 if (ret) {
11807 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11808 return ret;
11809 }
11810
11811 ret = init_mgr_tbl(hdev);
11812 if (ret) {
11813 dev_err(&pdev->dev,
11814 "failed to reinit manager table, ret = %d\n", ret);
11815 return ret;
11816 }
11817
11818 ret = hclge_init_fd_config(hdev);
11819 if (ret) {
11820 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11821 return ret;
11822 }
11823
11824 ret = hclge_ptp_init(hdev);
11825 if (ret)
11826 return ret;
11827
11828
11829 if (hnae3_dev_ras_imp_supported(hdev))
11830 hclge_handle_occurred_error(hdev);
11831 else
11832 hclge_handle_all_hns_hw_errors(ae_dev);
11833
11834
11835
11836
11837 ret = hclge_config_nic_hw_error(hdev, true);
11838 if (ret) {
11839 dev_err(&pdev->dev,
11840 "fail(%d) to re-enable NIC hw error interrupts\n",
11841 ret);
11842 return ret;
11843 }
11844
11845 if (hdev->roce_client) {
11846 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11847 if (ret) {
11848 dev_err(&pdev->dev,
11849 "fail(%d) to re-enable roce ras interrupts\n",
11850 ret);
11851 return ret;
11852 }
11853 }
11854
11855 hclge_reset_vport_state(hdev);
11856 ret = hclge_reset_vport_spoofchk(hdev);
11857 if (ret)
11858 return ret;
11859
11860 ret = hclge_resume_vf_rate(hdev);
11861 if (ret)
11862 return ret;
11863
11864 hclge_init_rxd_adv_layout(hdev);
11865
11866 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11867 HCLGE_DRIVER_NAME);
11868
11869 return 0;
11870}
11871
11872static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11873{
11874 struct hclge_dev *hdev = ae_dev->priv;
11875 struct hclge_mac *mac = &hdev->hw.mac;
11876
11877 hclge_reset_vf_rate(hdev);
11878 hclge_clear_vf_vlan(hdev);
11879 hclge_misc_affinity_teardown(hdev);
11880 hclge_state_uninit(hdev);
11881 hclge_ptp_uninit(hdev);
11882 hclge_uninit_rxd_adv_layout(hdev);
11883 hclge_uninit_mac_table(hdev);
11884 hclge_del_all_fd_entries(hdev);
11885
11886 if (mac->phydev)
11887 mdiobus_unregister(mac->mdio_bus);
11888
11889
11890 hclge_enable_vector(&hdev->misc_vector, false);
11891 synchronize_irq(hdev->misc_vector.vector_irq);
11892
11893
11894 hclge_config_mac_tnl_int(hdev, false);
11895 hclge_config_nic_hw_error(hdev, false);
11896 hclge_config_rocee_ras_interrupt(hdev, false);
11897
11898 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11899 hclge_misc_irq_uninit(hdev);
11900 hclge_devlink_uninit(hdev);
11901 hclge_pci_uninit(hdev);
11902 hclge_uninit_vport_vlan_table(hdev);
11903 mutex_destroy(&hdev->vport_lock);
11904 ae_dev->priv = NULL;
11905}
11906
11907static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11908{
11909 struct hclge_vport *vport = hclge_get_vport(handle);
11910 struct hclge_dev *hdev = vport->back;
11911
11912 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11913}
11914
11915static void hclge_get_channels(struct hnae3_handle *handle,
11916 struct ethtool_channels *ch)
11917{
11918 ch->max_combined = hclge_get_max_channels(handle);
11919 ch->other_count = 1;
11920 ch->max_other = 1;
11921 ch->combined_count = handle->kinfo.rss_size;
11922}
11923
11924static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11925 u16 *alloc_tqps, u16 *max_rss_size)
11926{
11927 struct hclge_vport *vport = hclge_get_vport(handle);
11928 struct hclge_dev *hdev = vport->back;
11929
11930 *alloc_tqps = vport->alloc_tqps;
11931 *max_rss_size = hdev->pf_rss_size_max;
11932}
11933
11934static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
11935{
11936 struct hclge_vport *vport = hclge_get_vport(handle);
11937 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11938 struct hclge_dev *hdev = vport->back;
11939 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11940 u16 tc_valid[HCLGE_MAX_TC_NUM];
11941 u16 roundup_size;
11942 unsigned int i;
11943
11944 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
11945 roundup_size = ilog2(roundup_size);
11946
11947 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11948 tc_valid[i] = 0;
11949
11950 if (!(hdev->hw_tc_map & BIT(i)))
11951 continue;
11952
11953 tc_valid[i] = 1;
11954 tc_size[i] = roundup_size;
11955 tc_offset[i] = vport->nic.kinfo.rss_size * i;
11956 }
11957
11958 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
11959 tc_size);
11960}
11961
11962static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11963 bool rxfh_configured)
11964{
11965 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11966 struct hclge_vport *vport = hclge_get_vport(handle);
11967 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11968 struct hclge_dev *hdev = vport->back;
11969 u16 cur_rss_size = kinfo->rss_size;
11970 u16 cur_tqps = kinfo->num_tqps;
11971 u32 *rss_indir;
11972 unsigned int i;
11973 int ret;
11974
11975 kinfo->req_rss_size = new_tqps_num;
11976
11977 ret = hclge_tm_vport_map_update(hdev);
11978 if (ret) {
11979 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11980 return ret;
11981 }
11982
11983 ret = hclge_set_rss_tc_mode_cfg(handle);
11984 if (ret)
11985 return ret;
11986
11987
11988 if (rxfh_configured)
11989 goto out;
11990
11991
11992 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11993 GFP_KERNEL);
11994 if (!rss_indir)
11995 return -ENOMEM;
11996
11997 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11998 rss_indir[i] = i % kinfo->rss_size;
11999
12000 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12001 if (ret)
12002 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12003 ret);
12004
12005 kfree(rss_indir);
12006
12007out:
12008 if (!ret)
12009 dev_info(&hdev->pdev->dev,
12010 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12011 cur_rss_size, kinfo->rss_size,
12012 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12013
12014 return ret;
12015}
12016
12017static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12018 u32 *regs_num_64_bit)
12019{
12020 struct hclge_desc desc;
12021 u32 total_num;
12022 int ret;
12023
12024 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12025 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12026 if (ret) {
12027 dev_err(&hdev->pdev->dev,
12028 "Query register number cmd failed, ret = %d.\n", ret);
12029 return ret;
12030 }
12031
12032 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12033 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12034
12035 total_num = *regs_num_32_bit + *regs_num_64_bit;
12036 if (!total_num)
12037 return -EINVAL;
12038
12039 return 0;
12040}
12041
12042static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12043 void *data)
12044{
12045#define HCLGE_32_BIT_REG_RTN_DATANUM 8
12046#define HCLGE_32_BIT_DESC_NODATA_LEN 2
12047
12048 struct hclge_desc *desc;
12049 u32 *reg_val = data;
12050 __le32 *desc_data;
12051 int nodata_num;
12052 int cmd_num;
12053 int i, k, n;
12054 int ret;
12055
12056 if (regs_num == 0)
12057 return 0;
12058
12059 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12060 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12061 HCLGE_32_BIT_REG_RTN_DATANUM);
12062 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12063 if (!desc)
12064 return -ENOMEM;
12065
12066 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12067 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12068 if (ret) {
12069 dev_err(&hdev->pdev->dev,
12070 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12071 kfree(desc);
12072 return ret;
12073 }
12074
12075 for (i = 0; i < cmd_num; i++) {
12076 if (i == 0) {
12077 desc_data = (__le32 *)(&desc[i].data[0]);
12078 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12079 } else {
12080 desc_data = (__le32 *)(&desc[i]);
12081 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12082 }
12083 for (k = 0; k < n; k++) {
12084 *reg_val++ = le32_to_cpu(*desc_data++);
12085
12086 regs_num--;
12087 if (!regs_num)
12088 break;
12089 }
12090 }
12091
12092 kfree(desc);
12093 return 0;
12094}
12095
12096static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12097 void *data)
12098{
12099#define HCLGE_64_BIT_REG_RTN_DATANUM 4
12100#define HCLGE_64_BIT_DESC_NODATA_LEN 1
12101
12102 struct hclge_desc *desc;
12103 u64 *reg_val = data;
12104 __le64 *desc_data;
12105 int nodata_len;
12106 int cmd_num;
12107 int i, k, n;
12108 int ret;
12109
12110 if (regs_num == 0)
12111 return 0;
12112
12113 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12114 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12115 HCLGE_64_BIT_REG_RTN_DATANUM);
12116 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12117 if (!desc)
12118 return -ENOMEM;
12119
12120 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12121 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12122 if (ret) {
12123 dev_err(&hdev->pdev->dev,
12124 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12125 kfree(desc);
12126 return ret;
12127 }
12128
12129 for (i = 0; i < cmd_num; i++) {
12130 if (i == 0) {
12131 desc_data = (__le64 *)(&desc[i].data[0]);
12132 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12133 } else {
12134 desc_data = (__le64 *)(&desc[i]);
12135 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12136 }
12137 for (k = 0; k < n; k++) {
12138 *reg_val++ = le64_to_cpu(*desc_data++);
12139
12140 regs_num--;
12141 if (!regs_num)
12142 break;
12143 }
12144 }
12145
12146 kfree(desc);
12147 return 0;
12148}
12149
12150#define MAX_SEPARATE_NUM 4
12151#define SEPARATOR_VALUE 0xFDFCFBFA
12152#define REG_NUM_PER_LINE 4
12153#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12154#define REG_SEPARATOR_LINE 1
12155#define REG_NUM_REMAIN_MASK 3
12156
12157int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12158{
12159 int i;
12160
12161
12162 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12163 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12164 true);
12165 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12166 }
12167
12168
12169 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12170
12171 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12172}
12173
12174static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12175 int *bd_num_list,
12176 u32 type_num)
12177{
12178 u32 entries_per_desc, desc_index, index, offset, i;
12179 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12180 int ret;
12181
12182 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12183 if (ret) {
12184 dev_err(&hdev->pdev->dev,
12185 "Get dfx bd num fail, status is %d.\n", ret);
12186 return ret;
12187 }
12188
12189 entries_per_desc = ARRAY_SIZE(desc[0].data);
12190 for (i = 0; i < type_num; i++) {
12191 offset = hclge_dfx_bd_offset_list[i];
12192 index = offset % entries_per_desc;
12193 desc_index = offset / entries_per_desc;
12194 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12195 }
12196
12197 return ret;
12198}
12199
12200static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12201 struct hclge_desc *desc_src, int bd_num,
12202 enum hclge_opcode_type cmd)
12203{
12204 struct hclge_desc *desc = desc_src;
12205 int i, ret;
12206
12207 hclge_cmd_setup_basic_desc(desc, cmd, true);
12208 for (i = 0; i < bd_num - 1; i++) {
12209 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12210 desc++;
12211 hclge_cmd_setup_basic_desc(desc, cmd, true);
12212 }
12213
12214 desc = desc_src;
12215 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12216 if (ret)
12217 dev_err(&hdev->pdev->dev,
12218 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12219 cmd, ret);
12220
12221 return ret;
12222}
12223
12224static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12225 void *data)
12226{
12227 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12228 struct hclge_desc *desc = desc_src;
12229 u32 *reg = data;
12230
12231 entries_per_desc = ARRAY_SIZE(desc->data);
12232 reg_num = entries_per_desc * bd_num;
12233 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12234 for (i = 0; i < reg_num; i++) {
12235 index = i % entries_per_desc;
12236 desc_index = i / entries_per_desc;
12237 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12238 }
12239 for (i = 0; i < separator_num; i++)
12240 *reg++ = SEPARATOR_VALUE;
12241
12242 return reg_num + separator_num;
12243}
12244
12245static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12246{
12247 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12248 int data_len_per_desc, bd_num, i;
12249 int *bd_num_list;
12250 u32 data_len;
12251 int ret;
12252
12253 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12254 if (!bd_num_list)
12255 return -ENOMEM;
12256
12257 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12258 if (ret) {
12259 dev_err(&hdev->pdev->dev,
12260 "Get dfx reg bd num fail, status is %d.\n", ret);
12261 goto out;
12262 }
12263
12264 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12265 *len = 0;
12266 for (i = 0; i < dfx_reg_type_num; i++) {
12267 bd_num = bd_num_list[i];
12268 data_len = data_len_per_desc * bd_num;
12269 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12270 }
12271
12272out:
12273 kfree(bd_num_list);
12274 return ret;
12275}
12276
12277static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12278{
12279 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12280 int bd_num, bd_num_max, buf_len, i;
12281 struct hclge_desc *desc_src;
12282 int *bd_num_list;
12283 u32 *reg = data;
12284 int ret;
12285
12286 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12287 if (!bd_num_list)
12288 return -ENOMEM;
12289
12290 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12291 if (ret) {
12292 dev_err(&hdev->pdev->dev,
12293 "Get dfx reg bd num fail, status is %d.\n", ret);
12294 goto out;
12295 }
12296
12297 bd_num_max = bd_num_list[0];
12298 for (i = 1; i < dfx_reg_type_num; i++)
12299 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12300
12301 buf_len = sizeof(*desc_src) * bd_num_max;
12302 desc_src = kzalloc(buf_len, GFP_KERNEL);
12303 if (!desc_src) {
12304 ret = -ENOMEM;
12305 goto out;
12306 }
12307
12308 for (i = 0; i < dfx_reg_type_num; i++) {
12309 bd_num = bd_num_list[i];
12310 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12311 hclge_dfx_reg_opcode_list[i]);
12312 if (ret) {
12313 dev_err(&hdev->pdev->dev,
12314 "Get dfx reg fail, status is %d.\n", ret);
12315 break;
12316 }
12317
12318 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12319 }
12320
12321 kfree(desc_src);
12322out:
12323 kfree(bd_num_list);
12324 return ret;
12325}
12326
12327static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12328 struct hnae3_knic_private_info *kinfo)
12329{
12330#define HCLGE_RING_REG_OFFSET 0x200
12331#define HCLGE_RING_INT_REG_OFFSET 0x4
12332
12333 int i, j, reg_num, separator_num;
12334 int data_num_sum;
12335 u32 *reg = data;
12336
12337
12338 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12339 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12340 for (i = 0; i < reg_num; i++)
12341 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12342 for (i = 0; i < separator_num; i++)
12343 *reg++ = SEPARATOR_VALUE;
12344 data_num_sum = reg_num + separator_num;
12345
12346 reg_num = ARRAY_SIZE(common_reg_addr_list);
12347 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12348 for (i = 0; i < reg_num; i++)
12349 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12350 for (i = 0; i < separator_num; i++)
12351 *reg++ = SEPARATOR_VALUE;
12352 data_num_sum += reg_num + separator_num;
12353
12354 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12355 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12356 for (j = 0; j < kinfo->num_tqps; j++) {
12357 for (i = 0; i < reg_num; i++)
12358 *reg++ = hclge_read_dev(&hdev->hw,
12359 ring_reg_addr_list[i] +
12360 HCLGE_RING_REG_OFFSET * j);
12361 for (i = 0; i < separator_num; i++)
12362 *reg++ = SEPARATOR_VALUE;
12363 }
12364 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12365
12366 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12367 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12368 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12369 for (i = 0; i < reg_num; i++)
12370 *reg++ = hclge_read_dev(&hdev->hw,
12371 tqp_intr_reg_addr_list[i] +
12372 HCLGE_RING_INT_REG_OFFSET * j);
12373 for (i = 0; i < separator_num; i++)
12374 *reg++ = SEPARATOR_VALUE;
12375 }
12376 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12377
12378 return data_num_sum;
12379}
12380
12381static int hclge_get_regs_len(struct hnae3_handle *handle)
12382{
12383 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12384 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12385 struct hclge_vport *vport = hclge_get_vport(handle);
12386 struct hclge_dev *hdev = vport->back;
12387 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12388 int regs_lines_32_bit, regs_lines_64_bit;
12389 int ret;
12390
12391 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12392 if (ret) {
12393 dev_err(&hdev->pdev->dev,
12394 "Get register number failed, ret = %d.\n", ret);
12395 return ret;
12396 }
12397
12398 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12399 if (ret) {
12400 dev_err(&hdev->pdev->dev,
12401 "Get dfx reg len failed, ret = %d.\n", ret);
12402 return ret;
12403 }
12404
12405 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12406 REG_SEPARATOR_LINE;
12407 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12408 REG_SEPARATOR_LINE;
12409 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12410 REG_SEPARATOR_LINE;
12411 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12412 REG_SEPARATOR_LINE;
12413 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12414 REG_SEPARATOR_LINE;
12415 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12416 REG_SEPARATOR_LINE;
12417
12418 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12419 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12420 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12421}
12422
12423static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12424 void *data)
12425{
12426 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12427 struct hclge_vport *vport = hclge_get_vport(handle);
12428 struct hclge_dev *hdev = vport->back;
12429 u32 regs_num_32_bit, regs_num_64_bit;
12430 int i, reg_num, separator_num, ret;
12431 u32 *reg = data;
12432
12433 *version = hdev->fw_version;
12434
12435 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12436 if (ret) {
12437 dev_err(&hdev->pdev->dev,
12438 "Get register number failed, ret = %d.\n", ret);
12439 return;
12440 }
12441
12442 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12443
12444 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12445 if (ret) {
12446 dev_err(&hdev->pdev->dev,
12447 "Get 32 bit register failed, ret = %d.\n", ret);
12448 return;
12449 }
12450 reg_num = regs_num_32_bit;
12451 reg += reg_num;
12452 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12453 for (i = 0; i < separator_num; i++)
12454 *reg++ = SEPARATOR_VALUE;
12455
12456 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12457 if (ret) {
12458 dev_err(&hdev->pdev->dev,
12459 "Get 64 bit register failed, ret = %d.\n", ret);
12460 return;
12461 }
12462 reg_num = regs_num_64_bit * 2;
12463 reg += reg_num;
12464 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12465 for (i = 0; i < separator_num; i++)
12466 *reg++ = SEPARATOR_VALUE;
12467
12468 ret = hclge_get_dfx_reg(hdev, reg);
12469 if (ret)
12470 dev_err(&hdev->pdev->dev,
12471 "Get dfx register failed, ret = %d.\n", ret);
12472}
12473
12474static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12475{
12476 struct hclge_set_led_state_cmd *req;
12477 struct hclge_desc desc;
12478 int ret;
12479
12480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12481
12482 req = (struct hclge_set_led_state_cmd *)desc.data;
12483 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12484 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12485
12486 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12487 if (ret)
12488 dev_err(&hdev->pdev->dev,
12489 "Send set led state cmd error, ret =%d\n", ret);
12490
12491 return ret;
12492}
12493
12494enum hclge_led_status {
12495 HCLGE_LED_OFF,
12496 HCLGE_LED_ON,
12497 HCLGE_LED_NO_CHANGE = 0xFF,
12498};
12499
12500static int hclge_set_led_id(struct hnae3_handle *handle,
12501 enum ethtool_phys_id_state status)
12502{
12503 struct hclge_vport *vport = hclge_get_vport(handle);
12504 struct hclge_dev *hdev = vport->back;
12505
12506 switch (status) {
12507 case ETHTOOL_ID_ACTIVE:
12508 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12509 case ETHTOOL_ID_INACTIVE:
12510 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12511 default:
12512 return -EINVAL;
12513 }
12514}
12515
12516static void hclge_get_link_mode(struct hnae3_handle *handle,
12517 unsigned long *supported,
12518 unsigned long *advertising)
12519{
12520 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12521 struct hclge_vport *vport = hclge_get_vport(handle);
12522 struct hclge_dev *hdev = vport->back;
12523 unsigned int idx = 0;
12524
12525 for (; idx < size; idx++) {
12526 supported[idx] = hdev->hw.mac.supported[idx];
12527 advertising[idx] = hdev->hw.mac.advertising[idx];
12528 }
12529}
12530
12531static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12532{
12533 struct hclge_vport *vport = hclge_get_vport(handle);
12534 struct hclge_dev *hdev = vport->back;
12535 bool gro_en_old = hdev->gro_en;
12536 int ret;
12537
12538 hdev->gro_en = enable;
12539 ret = hclge_config_gro(hdev);
12540 if (ret)
12541 hdev->gro_en = gro_en_old;
12542
12543 return ret;
12544}
12545
12546static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12547{
12548 struct hclge_vport *vport = &hdev->vport[0];
12549 struct hnae3_handle *handle = &vport->nic;
12550 u8 tmp_flags;
12551 int ret;
12552 u16 i;
12553
12554 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12555 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12556 vport->last_promisc_flags = vport->overflow_promisc_flags;
12557 }
12558
12559 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12560 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12561 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12562 tmp_flags & HNAE3_MPE);
12563 if (!ret) {
12564 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12565 &vport->state);
12566 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12567 &vport->state);
12568 }
12569 }
12570
12571 for (i = 1; i < hdev->num_alloc_vport; i++) {
12572 bool uc_en = false;
12573 bool mc_en = false;
12574 bool bc_en;
12575
12576 vport = &hdev->vport[i];
12577
12578 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12579 &vport->state))
12580 continue;
12581
12582 if (vport->vf_info.trusted) {
12583 uc_en = vport->vf_info.request_uc_en > 0 ||
12584 vport->overflow_promisc_flags &
12585 HNAE3_OVERFLOW_UPE;
12586 mc_en = vport->vf_info.request_mc_en > 0 ||
12587 vport->overflow_promisc_flags &
12588 HNAE3_OVERFLOW_MPE;
12589 }
12590 bc_en = vport->vf_info.request_bc_en > 0;
12591
12592 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12593 mc_en, bc_en);
12594 if (ret) {
12595 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12596 &vport->state);
12597 return;
12598 }
12599 hclge_set_vport_vlan_fltr_change(vport);
12600 }
12601}
12602
12603static bool hclge_module_existed(struct hclge_dev *hdev)
12604{
12605 struct hclge_desc desc;
12606 u32 existed;
12607 int ret;
12608
12609 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12610 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12611 if (ret) {
12612 dev_err(&hdev->pdev->dev,
12613 "failed to get SFP exist state, ret = %d\n", ret);
12614 return false;
12615 }
12616
12617 existed = le32_to_cpu(desc.data[0]);
12618
12619 return existed != 0;
12620}
12621
12622
12623
12624
12625static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12626 u32 len, u8 *data)
12627{
12628 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12629 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12630 u16 read_len;
12631 u16 copy_len;
12632 int ret;
12633 int i;
12634
12635
12636 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12637 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12638 true);
12639
12640
12641 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12642 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12643 }
12644
12645
12646 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12647 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12648 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12649 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12650
12651 ret = hclge_cmd_send(&hdev->hw, desc, i);
12652 if (ret) {
12653 dev_err(&hdev->pdev->dev,
12654 "failed to get SFP eeprom info, ret = %d\n", ret);
12655 return 0;
12656 }
12657
12658
12659 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12660 memcpy(data, sfp_info_bd0->data, copy_len);
12661 read_len = copy_len;
12662
12663
12664 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12665 if (read_len >= len)
12666 return read_len;
12667
12668 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12669 memcpy(data + read_len, desc[i].data, copy_len);
12670 read_len += copy_len;
12671 }
12672
12673 return read_len;
12674}
12675
12676static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12677 u32 len, u8 *data)
12678{
12679 struct hclge_vport *vport = hclge_get_vport(handle);
12680 struct hclge_dev *hdev = vport->back;
12681 u32 read_len = 0;
12682 u16 data_len;
12683
12684 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12685 return -EOPNOTSUPP;
12686
12687 if (!hclge_module_existed(hdev))
12688 return -ENXIO;
12689
12690 while (read_len < len) {
12691 data_len = hclge_get_sfp_eeprom_info(hdev,
12692 offset + read_len,
12693 len - read_len,
12694 data + read_len);
12695 if (!data_len)
12696 return -EIO;
12697
12698 read_len += data_len;
12699 }
12700
12701 return 0;
12702}
12703
12704static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12705 u32 *status_code)
12706{
12707 struct hclge_vport *vport = hclge_get_vport(handle);
12708 struct hclge_dev *hdev = vport->back;
12709 struct hclge_desc desc;
12710 int ret;
12711
12712 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12713 return -EOPNOTSUPP;
12714
12715 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12716 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12717 if (ret) {
12718 dev_err(&hdev->pdev->dev,
12719 "failed to query link diagnosis info, ret = %d\n", ret);
12720 return ret;
12721 }
12722
12723 *status_code = le32_to_cpu(desc.data[0]);
12724 return 0;
12725}
12726
12727
12728
12729
12730static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12731{
12732 struct hclge_dev *hdev = vport->back;
12733 struct hclge_vlan_info vlan_info;
12734 int ret;
12735
12736
12737 ret = hclge_tm_qs_shaper_cfg(vport, 0);
12738 if (ret)
12739 dev_err(&hdev->pdev->dev,
12740 "failed to clean vf%d rate config, ret = %d\n",
12741 vfid, ret);
12742
12743 vlan_info.vlan_tag = 0;
12744 vlan_info.qos = 0;
12745 vlan_info.vlan_proto = ETH_P_8021Q;
12746 ret = hclge_update_port_base_vlan_cfg(vport,
12747 HNAE3_PORT_BASE_VLAN_DISABLE,
12748 &vlan_info);
12749 if (ret)
12750 dev_err(&hdev->pdev->dev,
12751 "failed to clean vf%d port base vlan, ret = %d\n",
12752 vfid, ret);
12753
12754 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12755 if (ret)
12756 dev_err(&hdev->pdev->dev,
12757 "failed to clean vf%d spoof config, ret = %d\n",
12758 vfid, ret);
12759
12760 memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12761}
12762
12763static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12764{
12765 struct hclge_dev *hdev = ae_dev->priv;
12766 struct hclge_vport *vport;
12767 int i;
12768
12769 for (i = 0; i < num_vfs; i++) {
12770 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12771
12772 hclge_clear_vport_vf_info(vport, i);
12773 }
12774}
12775
12776static const struct hnae3_ae_ops hclge_ops = {
12777 .init_ae_dev = hclge_init_ae_dev,
12778 .uninit_ae_dev = hclge_uninit_ae_dev,
12779 .reset_prepare = hclge_reset_prepare_general,
12780 .reset_done = hclge_reset_done,
12781 .init_client_instance = hclge_init_client_instance,
12782 .uninit_client_instance = hclge_uninit_client_instance,
12783 .map_ring_to_vector = hclge_map_ring_to_vector,
12784 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12785 .get_vector = hclge_get_vector,
12786 .put_vector = hclge_put_vector,
12787 .set_promisc_mode = hclge_set_promisc_mode,
12788 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12789 .set_loopback = hclge_set_loopback,
12790 .start = hclge_ae_start,
12791 .stop = hclge_ae_stop,
12792 .client_start = hclge_client_start,
12793 .client_stop = hclge_client_stop,
12794 .get_status = hclge_get_status,
12795 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12796 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12797 .get_media_type = hclge_get_media_type,
12798 .check_port_speed = hclge_check_port_speed,
12799 .get_fec = hclge_get_fec,
12800 .set_fec = hclge_set_fec,
12801 .get_rss_key_size = hclge_comm_get_rss_key_size,
12802 .get_rss = hclge_get_rss,
12803 .set_rss = hclge_set_rss,
12804 .set_rss_tuple = hclge_set_rss_tuple,
12805 .get_rss_tuple = hclge_get_rss_tuple,
12806 .get_tc_size = hclge_get_tc_size,
12807 .get_mac_addr = hclge_get_mac_addr,
12808 .set_mac_addr = hclge_set_mac_addr,
12809 .do_ioctl = hclge_do_ioctl,
12810 .add_uc_addr = hclge_add_uc_addr,
12811 .rm_uc_addr = hclge_rm_uc_addr,
12812 .add_mc_addr = hclge_add_mc_addr,
12813 .rm_mc_addr = hclge_rm_mc_addr,
12814 .set_autoneg = hclge_set_autoneg,
12815 .get_autoneg = hclge_get_autoneg,
12816 .restart_autoneg = hclge_restart_autoneg,
12817 .halt_autoneg = hclge_halt_autoneg,
12818 .get_pauseparam = hclge_get_pauseparam,
12819 .set_pauseparam = hclge_set_pauseparam,
12820 .set_mtu = hclge_set_mtu,
12821 .reset_queue = hclge_reset_tqp,
12822 .get_stats = hclge_get_stats,
12823 .get_mac_stats = hclge_get_mac_stat,
12824 .update_stats = hclge_update_stats,
12825 .get_strings = hclge_get_strings,
12826 .get_sset_count = hclge_get_sset_count,
12827 .get_fw_version = hclge_get_fw_version,
12828 .get_mdix_mode = hclge_get_mdix_mode,
12829 .enable_vlan_filter = hclge_enable_vlan_filter,
12830 .set_vlan_filter = hclge_set_vlan_filter,
12831 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12832 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12833 .reset_event = hclge_reset_event,
12834 .get_reset_level = hclge_get_reset_level,
12835 .set_default_reset_request = hclge_set_def_reset_request,
12836 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12837 .set_channels = hclge_set_channels,
12838 .get_channels = hclge_get_channels,
12839 .get_regs_len = hclge_get_regs_len,
12840 .get_regs = hclge_get_regs,
12841 .set_led_id = hclge_set_led_id,
12842 .get_link_mode = hclge_get_link_mode,
12843 .add_fd_entry = hclge_add_fd_entry,
12844 .del_fd_entry = hclge_del_fd_entry,
12845 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12846 .get_fd_rule_info = hclge_get_fd_rule_info,
12847 .get_fd_all_rules = hclge_get_all_rules,
12848 .enable_fd = hclge_enable_fd,
12849 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12850 .dbg_read_cmd = hclge_dbg_read_cmd,
12851 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12852 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12853 .ae_dev_resetting = hclge_ae_dev_resetting,
12854 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12855 .set_gro_en = hclge_gro_en,
12856 .get_global_queue_id = hclge_covert_handle_qid_global,
12857 .set_timer_task = hclge_set_timer_task,
12858 .mac_connect_phy = hclge_mac_connect_phy,
12859 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12860 .get_vf_config = hclge_get_vf_config,
12861 .set_vf_link_state = hclge_set_vf_link_state,
12862 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12863 .set_vf_trust = hclge_set_vf_trust,
12864 .set_vf_rate = hclge_set_vf_rate,
12865 .set_vf_mac = hclge_set_vf_mac,
12866 .get_module_eeprom = hclge_get_module_eeprom,
12867 .get_cmdq_stat = hclge_get_cmdq_stat,
12868 .add_cls_flower = hclge_add_cls_flower,
12869 .del_cls_flower = hclge_del_cls_flower,
12870 .cls_flower_active = hclge_is_cls_flower_active,
12871 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12872 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12873 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12874 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12875 .get_ts_info = hclge_ptp_get_ts_info,
12876 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12877 .clean_vf_config = hclge_clean_vport_config,
12878};
12879
12880static struct hnae3_ae_algo ae_algo = {
12881 .ops = &hclge_ops,
12882 .pdev_id_table = ae_algo_pci_tbl,
12883};
12884
12885static int hclge_init(void)
12886{
12887 pr_info("%s is initializing\n", HCLGE_NAME);
12888
12889 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12890 if (!hclge_wq) {
12891 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12892 return -ENOMEM;
12893 }
12894
12895 hnae3_register_ae_algo(&ae_algo);
12896
12897 return 0;
12898}
12899
12900static void hclge_exit(void)
12901{
12902 hnae3_unregister_ae_algo_prepare(&ae_algo);
12903 hnae3_unregister_ae_algo(&ae_algo);
12904 destroy_workqueue(hclge_wq);
12905}
12906module_init(hclge_init);
12907module_exit(hclge_exit);
12908
12909MODULE_LICENSE("GPL");
12910MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12911MODULE_DESCRIPTION("HCLGE Driver");
12912MODULE_VERSION(HCLGE_MOD_VERSION);
12913