1
2
3
4#include "ice_common.h"
5#include "ice_lib.h"
6#include "ice_sched.h"
7#include "ice_adminq_cmd.h"
8#include "ice_flow.h"
9
10#define ICE_PF_RESET_WAIT_COUNT 300
11
12
13
14
15
16
17
18
19static enum ice_status ice_set_mac_type(struct ice_hw *hw)
20{
21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
22 return ICE_ERR_DEVICE_NOT_SUPPORTED;
23
24 switch (hw->device_id) {
25 case ICE_DEV_ID_E810C_BACKPLANE:
26 case ICE_DEV_ID_E810C_QSFP:
27 case ICE_DEV_ID_E810C_SFP:
28 case ICE_DEV_ID_E810_XXV_BACKPLANE:
29 case ICE_DEV_ID_E810_XXV_QSFP:
30 case ICE_DEV_ID_E810_XXV_SFP:
31 hw->mac_type = ICE_MAC_E810;
32 break;
33 case ICE_DEV_ID_E823C_10G_BASE_T:
34 case ICE_DEV_ID_E823C_BACKPLANE:
35 case ICE_DEV_ID_E823C_QSFP:
36 case ICE_DEV_ID_E823C_SFP:
37 case ICE_DEV_ID_E823C_SGMII:
38 case ICE_DEV_ID_E822C_10G_BASE_T:
39 case ICE_DEV_ID_E822C_BACKPLANE:
40 case ICE_DEV_ID_E822C_QSFP:
41 case ICE_DEV_ID_E822C_SFP:
42 case ICE_DEV_ID_E822C_SGMII:
43 case ICE_DEV_ID_E822L_10G_BASE_T:
44 case ICE_DEV_ID_E822L_BACKPLANE:
45 case ICE_DEV_ID_E822L_SFP:
46 case ICE_DEV_ID_E822L_SGMII:
47 case ICE_DEV_ID_E823L_10G_BASE_T:
48 case ICE_DEV_ID_E823L_1GBE:
49 case ICE_DEV_ID_E823L_BACKPLANE:
50 case ICE_DEV_ID_E823L_QSFP:
51 case ICE_DEV_ID_E823L_SFP:
52 hw->mac_type = ICE_MAC_GENERIC;
53 break;
54 default:
55 hw->mac_type = ICE_MAC_UNKNOWN;
56 break;
57 }
58
59 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
60 return 0;
61}
62
63
64
65
66
67
68
69bool ice_is_e810(struct ice_hw *hw)
70{
71 return hw->mac_type == ICE_MAC_E810;
72}
73
74
75
76
77
78
79
80
81enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
82{
83 struct ice_aq_desc desc;
84
85 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
86
87 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105static enum ice_status
106ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
107 struct ice_sq_cd *cd)
108{
109 struct ice_aqc_manage_mac_read_resp *resp;
110 struct ice_aqc_manage_mac_read *cmd;
111 struct ice_aq_desc desc;
112 enum ice_status status;
113 u16 flags;
114 u8 i;
115
116 cmd = &desc.params.mac_read;
117
118 if (buf_size < sizeof(*resp))
119 return ICE_ERR_BUF_TOO_SHORT;
120
121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
122
123 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
124 if (status)
125 return status;
126
127 resp = buf;
128 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
129
130 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
131 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
132 return ICE_ERR_CFG;
133 }
134
135
136 for (i = 0; i < cmd->num_addr; i++)
137 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
138 ether_addr_copy(hw->port_info->mac.lan_addr,
139 resp[i].mac_addr);
140 ether_addr_copy(hw->port_info->mac.perm_addr,
141 resp[i].mac_addr);
142 break;
143 }
144
145 return 0;
146}
147
148
149
150
151
152
153
154
155
156
157
158enum ice_status
159ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
160 struct ice_aqc_get_phy_caps_data *pcaps,
161 struct ice_sq_cd *cd)
162{
163 struct ice_aqc_get_phy_caps *cmd;
164 u16 pcaps_size = sizeof(*pcaps);
165 struct ice_aq_desc desc;
166 enum ice_status status;
167 struct ice_hw *hw;
168
169 cmd = &desc.params.get_phy;
170
171 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
172 return ICE_ERR_PARAM;
173 hw = pi->hw;
174
175 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
176 !ice_fw_supports_report_dflt_cfg(hw))
177 return ICE_ERR_PARAM;
178
179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
180
181 if (qual_mods)
182 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
183
184 cmd->param0 |= cpu_to_le16(report_mode);
185 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
186
187 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
188 report_mode);
189 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
190 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
191 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
192 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
193 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
194 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
195 pcaps->low_power_ctrl_an);
196 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
197 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
198 pcaps->eeer_value);
199 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
200 pcaps->link_fec_options);
201 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
202 pcaps->module_compliance_enforcement);
203 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
204 pcaps->extended_compliance_code);
205 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
206 pcaps->module_type[0]);
207 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
208 pcaps->module_type[1]);
209 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
210 pcaps->module_type[2]);
211
212 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
213 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
214 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
215 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
216 sizeof(pi->phy.link_info.module_type));
217 }
218
219 return status;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234static enum ice_status
235ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
236 struct ice_sq_cd *cd)
237{
238 struct ice_aqc_get_link_topo *cmd;
239 struct ice_aq_desc desc;
240
241 cmd = &desc.params.get_link_topo;
242
243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
244
245 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
246 ICE_AQC_LINK_TOPO_NODE_CTX_S);
247
248
249 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
250
251 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
252}
253
254
255
256
257
258
259
260
261static bool ice_is_media_cage_present(struct ice_port_info *pi)
262{
263
264
265
266
267 return !ice_aq_get_link_topo_handle(pi,
268 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
269 NULL);
270}
271
272
273
274
275
276static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
277{
278 struct ice_link_status *hw_link_info;
279
280 if (!pi)
281 return ICE_MEDIA_UNKNOWN;
282
283 hw_link_info = &pi->phy.link_info;
284 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
285
286 return ICE_MEDIA_UNKNOWN;
287
288 if (hw_link_info->phy_type_low) {
289
290
291
292
293
294 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
295 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
297 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
298 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
299 return ICE_MEDIA_DA;
300
301 switch (hw_link_info->phy_type_low) {
302 case ICE_PHY_TYPE_LOW_1000BASE_SX:
303 case ICE_PHY_TYPE_LOW_1000BASE_LX:
304 case ICE_PHY_TYPE_LOW_10GBASE_SR:
305 case ICE_PHY_TYPE_LOW_10GBASE_LR:
306 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
307 case ICE_PHY_TYPE_LOW_25GBASE_SR:
308 case ICE_PHY_TYPE_LOW_25GBASE_LR:
309 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
310 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
311 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
312 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
313 case ICE_PHY_TYPE_LOW_50GBASE_SR:
314 case ICE_PHY_TYPE_LOW_50GBASE_FR:
315 case ICE_PHY_TYPE_LOW_50GBASE_LR:
316 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
317 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
318 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
319 case ICE_PHY_TYPE_LOW_100GBASE_DR:
320 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
321 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
322 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
323 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
324 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
325 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
326 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
327 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
328 return ICE_MEDIA_FIBER;
329 case ICE_PHY_TYPE_LOW_100BASE_TX:
330 case ICE_PHY_TYPE_LOW_1000BASE_T:
331 case ICE_PHY_TYPE_LOW_2500BASE_T:
332 case ICE_PHY_TYPE_LOW_5GBASE_T:
333 case ICE_PHY_TYPE_LOW_10GBASE_T:
334 case ICE_PHY_TYPE_LOW_25GBASE_T:
335 return ICE_MEDIA_BASET;
336 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
337 case ICE_PHY_TYPE_LOW_25GBASE_CR:
338 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
339 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
340 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
341 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
342 case ICE_PHY_TYPE_LOW_50GBASE_CP:
343 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
344 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
345 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
346 return ICE_MEDIA_DA;
347 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
348 case ICE_PHY_TYPE_LOW_40G_XLAUI:
349 case ICE_PHY_TYPE_LOW_50G_LAUI2:
350 case ICE_PHY_TYPE_LOW_50G_AUI2:
351 case ICE_PHY_TYPE_LOW_50G_AUI1:
352 case ICE_PHY_TYPE_LOW_100G_AUI4:
353 case ICE_PHY_TYPE_LOW_100G_CAUI4:
354 if (ice_is_media_cage_present(pi))
355 return ICE_MEDIA_DA;
356 fallthrough;
357 case ICE_PHY_TYPE_LOW_1000BASE_KX:
358 case ICE_PHY_TYPE_LOW_2500BASE_KX:
359 case ICE_PHY_TYPE_LOW_2500BASE_X:
360 case ICE_PHY_TYPE_LOW_5GBASE_KR:
361 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
362 case ICE_PHY_TYPE_LOW_25GBASE_KR:
363 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
364 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
365 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
366 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
367 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
368 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
369 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
370 return ICE_MEDIA_BACKPLANE;
371 }
372 } else {
373 switch (hw_link_info->phy_type_high) {
374 case ICE_PHY_TYPE_HIGH_100G_AUI2:
375 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
380 return ICE_MEDIA_BACKPLANE;
381 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
382 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
383 return ICE_MEDIA_FIBER;
384 }
385 }
386 return ICE_MEDIA_UNKNOWN;
387}
388
389
390
391
392
393
394
395
396
397
398enum ice_status
399ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
400 struct ice_link_status *link, struct ice_sq_cd *cd)
401{
402 struct ice_aqc_get_link_status_data link_data = { 0 };
403 struct ice_aqc_get_link_status *resp;
404 struct ice_link_status *li_old, *li;
405 enum ice_media_type *hw_media_type;
406 struct ice_fc_info *hw_fc_info;
407 bool tx_pause, rx_pause;
408 struct ice_aq_desc desc;
409 enum ice_status status;
410 struct ice_hw *hw;
411 u16 cmd_flags;
412
413 if (!pi)
414 return ICE_ERR_PARAM;
415 hw = pi->hw;
416 li_old = &pi->phy.link_info_old;
417 hw_media_type = &pi->phy.media_type;
418 li = &pi->phy.link_info;
419 hw_fc_info = &pi->fc;
420
421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
422 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
423 resp = &desc.params.get_link_status;
424 resp->cmd_flags = cpu_to_le16(cmd_flags);
425 resp->lport_num = pi->lport;
426
427 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
428
429 if (status)
430 return status;
431
432
433 *li_old = *li;
434
435
436 li->link_speed = le16_to_cpu(link_data.link_speed);
437 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
438 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
439 *hw_media_type = ice_get_media_type(pi);
440 li->link_info = link_data.link_info;
441 li->link_cfg_err = link_data.link_cfg_err;
442 li->an_info = link_data.an_info;
443 li->ext_info = link_data.ext_info;
444 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
445 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
446 li->topo_media_conflict = link_data.topo_media_conflict;
447 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
448 ICE_AQ_CFG_PACING_TYPE_M);
449
450
451 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
452 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
453 if (tx_pause && rx_pause)
454 hw_fc_info->current_mode = ICE_FC_FULL;
455 else if (tx_pause)
456 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
457 else if (rx_pause)
458 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
459 else
460 hw_fc_info->current_mode = ICE_FC_NONE;
461
462 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
463
464 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
465 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
466 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
467 (unsigned long long)li->phy_type_low);
468 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
469 (unsigned long long)li->phy_type_high);
470 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
471 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
472 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
473 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
474 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
475 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
476 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
477 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
478 li->max_frame_size);
479 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
480
481
482 if (link)
483 *link = *li;
484
485
486 pi->phy.get_link_info = false;
487
488 return 0;
489}
490
491
492
493
494
495
496
497
498
499static void
500ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
501 struct ice_aqc_set_mac_cfg *cmd)
502{
503 u16 fc_thres_val, tx_timer_val;
504 u32 val;
505
506
507
508
509
510
511
512
513#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
514
515
516 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
517 tx_timer_val = val &
518 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
519 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
520
521
522 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
523 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
524
525 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
526}
527
528
529
530
531
532
533
534
535
536enum ice_status
537ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
538{
539 struct ice_aqc_set_mac_cfg *cmd;
540 struct ice_aq_desc desc;
541
542 cmd = &desc.params.set_mac_cfg;
543
544 if (max_frame_size == 0)
545 return ICE_ERR_PARAM;
546
547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
548
549 cmd->max_frame_size = cpu_to_le16(max_frame_size);
550
551 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
552
553 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
554}
555
556
557
558
559
560static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
561{
562 struct ice_switch_info *sw;
563 enum ice_status status;
564
565 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
566 sizeof(*hw->switch_info), GFP_KERNEL);
567 sw = hw->switch_info;
568
569 if (!sw)
570 return ICE_ERR_NO_MEMORY;
571
572 INIT_LIST_HEAD(&sw->vsi_list_map_head);
573
574 status = ice_init_def_sw_recp(hw);
575 if (status) {
576 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
577 return status;
578 }
579 return 0;
580}
581
582
583
584
585
586static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
587{
588 struct ice_switch_info *sw = hw->switch_info;
589 struct ice_vsi_list_map_info *v_pos_map;
590 struct ice_vsi_list_map_info *v_tmp_map;
591 struct ice_sw_recipe *recps;
592 u8 i;
593
594 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
595 list_entry) {
596 list_del(&v_pos_map->list_entry);
597 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
598 }
599 recps = hw->switch_info->recp_list;
600 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
601 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
602
603 recps[i].root_rid = i;
604 mutex_destroy(&recps[i].filt_rule_lock);
605 list_for_each_entry_safe(lst_itr, tmp_entry,
606 &recps[i].filt_rules, list_entry) {
607 list_del(&lst_itr->list_entry);
608 devm_kfree(ice_hw_to_dev(hw), lst_itr);
609 }
610 }
611 ice_rm_all_sw_replay_rule_info(hw);
612 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
613 devm_kfree(ice_hw_to_dev(hw), sw);
614}
615
616
617
618
619
620static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
621{
622 struct ice_aq_desc desc;
623 enum ice_status status;
624 __le16 *config;
625 u16 size;
626
627 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
628 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
629 if (!config)
630 return ICE_ERR_NO_MEMORY;
631
632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
633
634 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
635 if (!status) {
636 u16 i;
637
638
639 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
640 u16 v, m, flgs;
641
642 v = le16_to_cpu(config[i]);
643 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
644 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
645
646 if (m < ICE_AQC_FW_LOG_ID_MAX)
647 hw->fw_log.evnts[m].cur = flgs;
648 }
649 }
650
651 devm_kfree(ice_hw_to_dev(hw), config);
652
653 return status;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
693{
694 struct ice_aqc_fw_logging *cmd;
695 enum ice_status status = 0;
696 u16 i, chgs = 0, len = 0;
697 struct ice_aq_desc desc;
698 __le16 *data = NULL;
699 u8 actv_evnts = 0;
700 void *buf = NULL;
701
702 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
703 return 0;
704
705
706 if (!enable &&
707 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
708 return 0;
709
710
711 status = ice_get_fw_log_cfg(hw);
712 if (status)
713 return status;
714
715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
716 cmd = &desc.params.fw_logging;
717
718
719 if (hw->fw_log.cq_en)
720 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
721
722 if (hw->fw_log.uart_en)
723 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
724
725 if (enable) {
726
727
728
729 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
730 u16 val;
731
732
733 actv_evnts |= hw->fw_log.evnts[i].cfg;
734
735 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
736 continue;
737
738 if (!data) {
739 data = devm_kcalloc(ice_hw_to_dev(hw),
740 ICE_AQC_FW_LOG_ID_MAX,
741 sizeof(*data),
742 GFP_KERNEL);
743 if (!data)
744 return ICE_ERR_NO_MEMORY;
745 }
746
747 val = i << ICE_AQC_FW_LOG_ID_S;
748 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
749 data[chgs++] = cpu_to_le16(val);
750 }
751
752
753
754
755
756 if (actv_evnts) {
757
758 if (!chgs)
759 goto out;
760
761 if (hw->fw_log.cq_en)
762 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
763
764 if (hw->fw_log.uart_en)
765 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
766
767 buf = data;
768 len = sizeof(*data) * chgs;
769 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
770 }
771 }
772
773 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
774 if (!status) {
775
776
777
778
779
780
781 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
782
783 hw->fw_log.actv_evnts = actv_evnts;
784 for (i = 0; i < cnt; i++) {
785 u16 v, m;
786
787 if (!enable) {
788
789
790
791
792
793
794 hw->fw_log.evnts[i].cur = 0;
795 continue;
796 }
797
798 v = le16_to_cpu(data[i]);
799 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
800 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
801 }
802 }
803
804out:
805 if (data)
806 devm_kfree(ice_hw_to_dev(hw), data);
807
808 return status;
809}
810
811
812
813
814
815
816
817
818
819void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
820{
821 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
822 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
823 le16_to_cpu(desc->datalen));
824 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
825}
826
827
828
829
830
831
832
833
834static void ice_get_itr_intrl_gran(struct ice_hw *hw)
835{
836 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
837 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
838 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
839
840 switch (max_agg_bw) {
841 case ICE_MAX_AGG_BW_200G:
842 case ICE_MAX_AGG_BW_100G:
843 case ICE_MAX_AGG_BW_50G:
844 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
845 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
846 break;
847 case ICE_MAX_AGG_BW_25G:
848 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
849 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
850 break;
851 }
852}
853
854
855
856
857
858enum ice_status ice_init_hw(struct ice_hw *hw)
859{
860 struct ice_aqc_get_phy_caps_data *pcaps;
861 enum ice_status status;
862 u16 mac_buf_len;
863 void *mac_buf;
864
865
866 status = ice_set_mac_type(hw);
867 if (status)
868 return status;
869
870 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
871 PF_FUNC_RID_FUNC_NUM_M) >>
872 PF_FUNC_RID_FUNC_NUM_S;
873
874 status = ice_reset(hw, ICE_RESET_PFR);
875 if (status)
876 return status;
877
878 ice_get_itr_intrl_gran(hw);
879
880 status = ice_create_all_ctrlq(hw);
881 if (status)
882 goto err_unroll_cqinit;
883
884
885 status = ice_cfg_fw_log(hw, true);
886 if (status)
887 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
888
889 status = ice_clear_pf_cfg(hw);
890 if (status)
891 goto err_unroll_cqinit;
892
893
894 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
895 INIT_LIST_HEAD(&hw->fdir_list_head);
896
897 ice_clear_pxe_mode(hw);
898
899 status = ice_init_nvm(hw);
900 if (status)
901 goto err_unroll_cqinit;
902
903 status = ice_get_caps(hw);
904 if (status)
905 goto err_unroll_cqinit;
906
907 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
908 sizeof(*hw->port_info), GFP_KERNEL);
909 if (!hw->port_info) {
910 status = ICE_ERR_NO_MEMORY;
911 goto err_unroll_cqinit;
912 }
913
914
915 hw->port_info->hw = hw;
916
917
918 status = ice_get_initial_sw_cfg(hw);
919 if (status)
920 goto err_unroll_alloc;
921
922 hw->evb_veb = true;
923
924
925 status = ice_sched_query_res_alloc(hw);
926 if (status) {
927 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
928 goto err_unroll_alloc;
929 }
930 ice_sched_get_psm_clk_freq(hw);
931
932
933 status = ice_sched_init_port(hw->port_info);
934 if (status)
935 goto err_unroll_sched;
936
937 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
938 if (!pcaps) {
939 status = ICE_ERR_NO_MEMORY;
940 goto err_unroll_sched;
941 }
942
943
944 status = ice_aq_get_phy_caps(hw->port_info, false,
945 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
946 NULL);
947 devm_kfree(ice_hw_to_dev(hw), pcaps);
948 if (status)
949 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
950 status);
951
952
953 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
954 if (status)
955 goto err_unroll_sched;
956
957
958 if (!hw->sw_entry_point_layer) {
959 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
960 status = ICE_ERR_CFG;
961 goto err_unroll_sched;
962 }
963 INIT_LIST_HEAD(&hw->agg_list);
964
965 if (!hw->max_burst_size)
966 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
967
968 status = ice_init_fltr_mgmt_struct(hw);
969 if (status)
970 goto err_unroll_sched;
971
972
973
974 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
975 sizeof(struct ice_aqc_manage_mac_read_resp),
976 GFP_KERNEL);
977 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
978
979 if (!mac_buf) {
980 status = ICE_ERR_NO_MEMORY;
981 goto err_unroll_fltr_mgmt_struct;
982 }
983
984 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
985 devm_kfree(ice_hw_to_dev(hw), mac_buf);
986
987 if (status)
988 goto err_unroll_fltr_mgmt_struct;
989
990 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
991 if (status)
992 goto err_unroll_fltr_mgmt_struct;
993
994 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
995 if (status)
996 goto err_unroll_fltr_mgmt_struct;
997 status = ice_init_hw_tbls(hw);
998 if (status)
999 goto err_unroll_fltr_mgmt_struct;
1000 mutex_init(&hw->tnl_lock);
1001 return 0;
1002
1003err_unroll_fltr_mgmt_struct:
1004 ice_cleanup_fltr_mgmt_struct(hw);
1005err_unroll_sched:
1006 ice_sched_cleanup_all(hw);
1007err_unroll_alloc:
1008 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1009err_unroll_cqinit:
1010 ice_destroy_all_ctrlq(hw);
1011 return status;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022void ice_deinit_hw(struct ice_hw *hw)
1023{
1024 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1025 ice_cleanup_fltr_mgmt_struct(hw);
1026
1027 ice_sched_cleanup_all(hw);
1028 ice_sched_clear_agg(hw);
1029 ice_free_seg(hw);
1030 ice_free_hw_tbls(hw);
1031 mutex_destroy(&hw->tnl_lock);
1032
1033 if (hw->port_info) {
1034 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1035 hw->port_info = NULL;
1036 }
1037
1038
1039 ice_cfg_fw_log(hw, false);
1040 ice_destroy_all_ctrlq(hw);
1041
1042
1043 ice_clear_all_vsi_ctx(hw);
1044}
1045
1046
1047
1048
1049
1050enum ice_status ice_check_reset(struct ice_hw *hw)
1051{
1052 u32 cnt, reg = 0, grst_timeout, uld_mask;
1053
1054
1055
1056
1057
1058 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1059 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1060
1061 for (cnt = 0; cnt < grst_timeout; cnt++) {
1062 mdelay(100);
1063 reg = rd32(hw, GLGEN_RSTAT);
1064 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1065 break;
1066 }
1067
1068 if (cnt == grst_timeout) {
1069 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1070 return ICE_ERR_RESET_FAILED;
1071 }
1072
1073#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1074 GLNVM_ULD_PCIER_DONE_1_M |\
1075 GLNVM_ULD_CORER_DONE_M |\
1076 GLNVM_ULD_GLOBR_DONE_M |\
1077 GLNVM_ULD_POR_DONE_M |\
1078 GLNVM_ULD_POR_DONE_1_M |\
1079 GLNVM_ULD_PCIER_DONE_2_M)
1080
1081 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1082 GLNVM_ULD_PE_DONE_M : 0);
1083
1084
1085 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1086 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1087 if (reg == uld_mask) {
1088 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1089 break;
1090 }
1091 mdelay(10);
1092 }
1093
1094 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1095 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1096 reg);
1097 return ICE_ERR_RESET_FAILED;
1098 }
1099
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110static enum ice_status ice_pf_reset(struct ice_hw *hw)
1111{
1112 u32 cnt, reg;
1113
1114
1115
1116
1117
1118
1119 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1120 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1121
1122 if (ice_check_reset(hw))
1123 return ICE_ERR_RESET_FAILED;
1124
1125 return 0;
1126 }
1127
1128
1129 reg = rd32(hw, PFGEN_CTRL);
1130
1131 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1132
1133
1134
1135
1136
1137 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1138 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1139 reg = rd32(hw, PFGEN_CTRL);
1140 if (!(reg & PFGEN_CTRL_PFSWR_M))
1141 break;
1142
1143 mdelay(1);
1144 }
1145
1146 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1147 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1148 return ICE_ERR_RESET_FAILED;
1149 }
1150
1151 return 0;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1167{
1168 u32 val = 0;
1169
1170 switch (req) {
1171 case ICE_RESET_PFR:
1172 return ice_pf_reset(hw);
1173 case ICE_RESET_CORER:
1174 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1175 val = GLGEN_RTRIG_CORER_M;
1176 break;
1177 case ICE_RESET_GLOBR:
1178 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1179 val = GLGEN_RTRIG_GLOBR_M;
1180 break;
1181 default:
1182 return ICE_ERR_PARAM;
1183 }
1184
1185 val |= rd32(hw, GLGEN_RTRIG);
1186 wr32(hw, GLGEN_RTRIG, val);
1187 ice_flush(hw);
1188
1189
1190 return ice_check_reset(hw);
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static enum ice_status
1202ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1203{
1204 u8 i;
1205
1206 if (!ice_rxq_ctx)
1207 return ICE_ERR_BAD_PTR;
1208
1209 if (rxq_index > QRX_CTRL_MAX_INDEX)
1210 return ICE_ERR_PARAM;
1211
1212
1213 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1214 wr32(hw, QRX_CONTEXT(i, rxq_index),
1215 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1216
1217 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1218 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1219 }
1220
1221 return 0;
1222}
1223
1224
1225static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1226
1227 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1228 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1229 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1230 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1231 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1232 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1233 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1234 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1235 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1236 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1237 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1238 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1239 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1240 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1241 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1242 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1243 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1244 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1245 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1246 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1247 { 0 }
1248};
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260enum ice_status
1261ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1262 u32 rxq_index)
1263{
1264 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1265
1266 if (!rlan_ctx)
1267 return ICE_ERR_BAD_PTR;
1268
1269 rlan_ctx->prefena = 1;
1270
1271 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1272 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1273}
1274
1275
1276const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1277
1278 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1279 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1280 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1281 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1282 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1283 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1284 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1285 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1286 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1287 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1288 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1289 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1290 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1291 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1292 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1293 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1294 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1295 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1296 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1297 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1298 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1299 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1300 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1301 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1302 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1303 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1304 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1305 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1306 { 0 }
1307};
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static int
1320ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1321 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1322{
1323 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
1324 (struct ice_aq_desc *)desc,
1325 buf, buf_size, cd));
1326}
1327
1328
1329
1330
1331
1332
1333int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1334{
1335 struct ice_sbq_cmd_desc desc = {0};
1336 struct ice_sbq_msg_req msg = {0};
1337 u16 msg_len;
1338 int status;
1339
1340 msg_len = sizeof(msg);
1341
1342 msg.dest_dev = in->dest_dev;
1343 msg.opcode = in->opcode;
1344 msg.flags = ICE_SBQ_MSG_FLAGS;
1345 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1346 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1347 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1348
1349 if (in->opcode)
1350 msg.data = cpu_to_le32(in->data);
1351 else
1352
1353
1354
1355 msg_len -= sizeof(msg.data);
1356
1357 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1358 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1359 desc.param0.cmd_len = cpu_to_le16(msg_len);
1360 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1361 if (!status && !in->opcode)
1362 in->data = le32_to_cpu
1363 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1364 return status;
1365}
1366
1367
1368
1369
1370
1371
1372
1373DEFINE_MUTEX(ice_global_cfg_lock_sw);
1374
1375
1376
1377
1378
1379
1380
1381
1382static bool ice_should_retry_sq_send_cmd(u16 opcode)
1383{
1384 switch (opcode) {
1385 case ice_aqc_opc_get_link_topo:
1386 case ice_aqc_opc_lldp_stop:
1387 case ice_aqc_opc_lldp_start:
1388 case ice_aqc_opc_lldp_filter_ctrl:
1389 return true;
1390 }
1391
1392 return false;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static enum ice_status
1408ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1409 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1410 struct ice_sq_cd *cd)
1411{
1412 struct ice_aq_desc desc_cpy;
1413 enum ice_status status;
1414 bool is_cmd_for_retry;
1415 u8 *buf_cpy = NULL;
1416 u8 idx = 0;
1417 u16 opcode;
1418
1419 opcode = le16_to_cpu(desc->opcode);
1420 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1421 memset(&desc_cpy, 0, sizeof(desc_cpy));
1422
1423 if (is_cmd_for_retry) {
1424 if (buf) {
1425 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1426 if (!buf_cpy)
1427 return ICE_ERR_NO_MEMORY;
1428 }
1429
1430 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1431 }
1432
1433 do {
1434 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1435
1436 if (!is_cmd_for_retry || !status ||
1437 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1438 break;
1439
1440 if (buf_cpy)
1441 memcpy(buf, buf_cpy, buf_size);
1442
1443 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1444
1445 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1446
1447 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1448
1449 kfree(buf_cpy);
1450
1451 return status;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464enum ice_status
1465ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1466 u16 buf_size, struct ice_sq_cd *cd)
1467{
1468 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1469 bool lock_acquired = false;
1470 enum ice_status status;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 switch (le16_to_cpu(desc->opcode)) {
1481 case ice_aqc_opc_download_pkg:
1482 case ice_aqc_opc_get_pkg_info_list:
1483 case ice_aqc_opc_get_ver:
1484 break;
1485 case ice_aqc_opc_release_res:
1486 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1487 break;
1488 fallthrough;
1489 default:
1490 mutex_lock(&ice_global_cfg_lock_sw);
1491 lock_acquired = true;
1492 break;
1493 }
1494
1495 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1496 if (lock_acquired)
1497 mutex_unlock(&ice_global_cfg_lock_sw);
1498
1499 return status;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1510{
1511 struct ice_aqc_get_ver *resp;
1512 struct ice_aq_desc desc;
1513 enum ice_status status;
1514
1515 resp = &desc.params.get_ver;
1516
1517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1518
1519 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1520
1521 if (!status) {
1522 hw->fw_branch = resp->fw_branch;
1523 hw->fw_maj_ver = resp->fw_major;
1524 hw->fw_min_ver = resp->fw_minor;
1525 hw->fw_patch = resp->fw_patch;
1526 hw->fw_build = le32_to_cpu(resp->fw_build);
1527 hw->api_branch = resp->api_branch;
1528 hw->api_maj_ver = resp->api_major;
1529 hw->api_min_ver = resp->api_minor;
1530 hw->api_patch = resp->api_patch;
1531 }
1532
1533 return status;
1534}
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544enum ice_status
1545ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1546 struct ice_sq_cd *cd)
1547{
1548 struct ice_aqc_driver_ver *cmd;
1549 struct ice_aq_desc desc;
1550 u16 len;
1551
1552 cmd = &desc.params.driver_ver;
1553
1554 if (!dv)
1555 return ICE_ERR_PARAM;
1556
1557 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1558
1559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1560 cmd->major_ver = dv->major_ver;
1561 cmd->minor_ver = dv->minor_ver;
1562 cmd->build_ver = dv->build_ver;
1563 cmd->subbuild_ver = dv->subbuild_ver;
1564
1565 len = 0;
1566 while (len < sizeof(dv->driver_string) &&
1567 isascii(dv->driver_string[len]) && dv->driver_string[len])
1568 len++;
1569
1570 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1582{
1583 struct ice_aqc_q_shutdown *cmd;
1584 struct ice_aq_desc desc;
1585
1586 cmd = &desc.params.q_shutdown;
1587
1588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1589
1590 if (unloading)
1591 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1592
1593 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static enum ice_status
1623ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1624 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1625 struct ice_sq_cd *cd)
1626{
1627 struct ice_aqc_req_res *cmd_resp;
1628 struct ice_aq_desc desc;
1629 enum ice_status status;
1630
1631 cmd_resp = &desc.params.res_owner;
1632
1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1634
1635 cmd_resp->res_id = cpu_to_le16(res);
1636 cmd_resp->access_type = cpu_to_le16(access);
1637 cmd_resp->res_number = cpu_to_le32(sdp_number);
1638 cmd_resp->timeout = cpu_to_le32(*timeout);
1639 *timeout = 0;
1640
1641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1655 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1656 *timeout = le32_to_cpu(cmd_resp->timeout);
1657 return 0;
1658 } else if (le16_to_cpu(cmd_resp->status) ==
1659 ICE_AQ_RES_GLBL_IN_PROG) {
1660 *timeout = le32_to_cpu(cmd_resp->timeout);
1661 return ICE_ERR_AQ_ERROR;
1662 } else if (le16_to_cpu(cmd_resp->status) ==
1663 ICE_AQ_RES_GLBL_DONE) {
1664 return ICE_ERR_AQ_NO_WORK;
1665 }
1666
1667
1668 *timeout = 0;
1669 return ICE_ERR_AQ_ERROR;
1670 }
1671
1672
1673
1674
1675
1676 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1677 *timeout = le32_to_cpu(cmd_resp->timeout);
1678
1679 return status;
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static enum ice_status
1692ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1693 struct ice_sq_cd *cd)
1694{
1695 struct ice_aqc_req_res *cmd;
1696 struct ice_aq_desc desc;
1697
1698 cmd = &desc.params.res_owner;
1699
1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1701
1702 cmd->res_id = cpu_to_le16(res);
1703 cmd->res_number = cpu_to_le32(sdp_number);
1704
1705 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717enum ice_status
1718ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1719 enum ice_aq_res_access_type access, u32 timeout)
1720{
1721#define ICE_RES_POLLING_DELAY_MS 10
1722 u32 delay = ICE_RES_POLLING_DELAY_MS;
1723 u32 time_left = timeout;
1724 enum ice_status status;
1725
1726 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1727
1728
1729
1730
1731
1732
1733 if (status == ICE_ERR_AQ_NO_WORK)
1734 goto ice_acquire_res_exit;
1735
1736 if (status)
1737 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1738
1739
1740 timeout = time_left;
1741 while (status && timeout && time_left) {
1742 mdelay(delay);
1743 timeout = (timeout > delay) ? timeout - delay : 0;
1744 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1745
1746 if (status == ICE_ERR_AQ_NO_WORK)
1747
1748 break;
1749
1750 if (!status)
1751
1752 break;
1753 }
1754 if (status && status != ICE_ERR_AQ_NO_WORK)
1755 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1756
1757ice_acquire_res_exit:
1758 if (status == ICE_ERR_AQ_NO_WORK) {
1759 if (access == ICE_RES_WRITE)
1760 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1761 else
1762 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1763 }
1764 return status;
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1775{
1776 enum ice_status status;
1777 u32 total_delay = 0;
1778
1779 status = ice_aq_release_res(hw, res, 0, NULL);
1780
1781
1782
1783
1784 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1785 (total_delay < hw->adminq.sq_cmd_timeout)) {
1786 mdelay(1);
1787 status = ice_aq_release_res(hw, res, 0, NULL);
1788 total_delay++;
1789 }
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803enum ice_status
1804ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1805 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1806 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1807{
1808 struct ice_aqc_alloc_free_res_cmd *cmd;
1809 struct ice_aq_desc desc;
1810
1811 cmd = &desc.params.sw_res_ctrl;
1812
1813 if (!buf)
1814 return ICE_ERR_PARAM;
1815
1816 if (buf_size < flex_array_size(buf, elem, num_entries))
1817 return ICE_ERR_PARAM;
1818
1819 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1820
1821 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1822
1823 cmd->num_entries = cpu_to_le16(num_entries);
1824
1825 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836enum ice_status
1837ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1838{
1839 struct ice_aqc_alloc_free_res_elem *buf;
1840 enum ice_status status;
1841 u16 buf_len;
1842
1843 buf_len = struct_size(buf, elem, num);
1844 buf = kzalloc(buf_len, GFP_KERNEL);
1845 if (!buf)
1846 return ICE_ERR_NO_MEMORY;
1847
1848
1849 buf->num_elems = cpu_to_le16(num);
1850 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1851 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1852 if (btm)
1853 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1854
1855 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1856 ice_aqc_opc_alloc_res, NULL);
1857 if (status)
1858 goto ice_alloc_res_exit;
1859
1860 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1861
1862ice_alloc_res_exit:
1863 kfree(buf);
1864 return status;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1875{
1876 struct ice_aqc_alloc_free_res_elem *buf;
1877 enum ice_status status;
1878 u16 buf_len;
1879
1880 buf_len = struct_size(buf, elem, num);
1881 buf = kzalloc(buf_len, GFP_KERNEL);
1882 if (!buf)
1883 return ICE_ERR_NO_MEMORY;
1884
1885
1886 buf->num_elems = cpu_to_le16(num);
1887 buf->res_type = cpu_to_le16(type);
1888 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1889
1890 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1891 ice_aqc_opc_free_res, NULL);
1892 if (status)
1893 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1894
1895 kfree(buf);
1896 return status;
1897}
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1909{
1910 u8 funcs;
1911
1912#define ICE_CAPS_VALID_FUNCS_M 0xFF
1913 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1914 ICE_CAPS_VALID_FUNCS_M);
1915
1916 if (!funcs)
1917 return 0;
1918
1919 return max / funcs;
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935static bool
1936ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1937 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1938{
1939 u32 logical_id = le32_to_cpu(elem->logical_id);
1940 u32 phys_id = le32_to_cpu(elem->phys_id);
1941 u32 number = le32_to_cpu(elem->number);
1942 u16 cap = le16_to_cpu(elem->cap);
1943 bool found = true;
1944
1945 switch (cap) {
1946 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1947 caps->valid_functions = number;
1948 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1949 caps->valid_functions);
1950 break;
1951 case ICE_AQC_CAPS_SRIOV:
1952 caps->sr_iov_1_1 = (number == 1);
1953 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1954 caps->sr_iov_1_1);
1955 break;
1956 case ICE_AQC_CAPS_DCB:
1957 caps->dcb = (number == 1);
1958 caps->active_tc_bitmap = logical_id;
1959 caps->maxtc = phys_id;
1960 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1961 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1962 caps->active_tc_bitmap);
1963 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1964 break;
1965 case ICE_AQC_CAPS_RSS:
1966 caps->rss_table_size = number;
1967 caps->rss_table_entry_width = logical_id;
1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1969 caps->rss_table_size);
1970 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1971 caps->rss_table_entry_width);
1972 break;
1973 case ICE_AQC_CAPS_RXQS:
1974 caps->num_rxq = number;
1975 caps->rxq_first_id = phys_id;
1976 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1977 caps->num_rxq);
1978 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1979 caps->rxq_first_id);
1980 break;
1981 case ICE_AQC_CAPS_TXQS:
1982 caps->num_txq = number;
1983 caps->txq_first_id = phys_id;
1984 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1985 caps->num_txq);
1986 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1987 caps->txq_first_id);
1988 break;
1989 case ICE_AQC_CAPS_MSIX:
1990 caps->num_msix_vectors = number;
1991 caps->msix_vector_first_id = phys_id;
1992 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1993 caps->num_msix_vectors);
1994 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1995 caps->msix_vector_first_id);
1996 break;
1997 case ICE_AQC_CAPS_PENDING_NVM_VER:
1998 caps->nvm_update_pending_nvm = true;
1999 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2000 break;
2001 case ICE_AQC_CAPS_PENDING_OROM_VER:
2002 caps->nvm_update_pending_orom = true;
2003 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2004 break;
2005 case ICE_AQC_CAPS_PENDING_NET_VER:
2006 caps->nvm_update_pending_netlist = true;
2007 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2008 break;
2009 case ICE_AQC_CAPS_NVM_MGMT:
2010 caps->nvm_unified_update =
2011 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2012 true : false;
2013 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2014 caps->nvm_unified_update);
2015 break;
2016 case ICE_AQC_CAPS_RDMA:
2017 caps->rdma = (number == 1);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2019 break;
2020 case ICE_AQC_CAPS_MAX_MTU:
2021 caps->max_mtu = number;
2022 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2023 prefix, caps->max_mtu);
2024 break;
2025 default:
2026
2027 found = false;
2028 }
2029
2030 return found;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042static void
2043ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2044{
2045
2046
2047
2048 if (hw->dev_caps.num_funcs > 4) {
2049
2050 caps->maxtc = 4;
2051 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2052 caps->maxtc);
2053 if (caps->rdma) {
2054 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2055 caps->rdma = 0;
2056 }
2057
2058
2059
2060
2061 if (caps == &hw->dev_caps.common_cap)
2062 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2063 }
2064}
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074static void
2075ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2076 struct ice_aqc_list_caps_elem *cap)
2077{
2078 u32 logical_id = le32_to_cpu(cap->logical_id);
2079 u32 number = le32_to_cpu(cap->number);
2080
2081 func_p->num_allocd_vfs = number;
2082 func_p->vf_base_id = logical_id;
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2084 func_p->num_allocd_vfs);
2085 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2086 func_p->vf_base_id);
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static void
2098ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2099 struct ice_aqc_list_caps_elem *cap)
2100{
2101 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2103 le32_to_cpu(cap->number));
2104 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2105 func_p->guar_num_vsi);
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116static void
2117ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2118 struct ice_aqc_list_caps_elem *cap)
2119{
2120 struct ice_ts_func_info *info = &func_p->ts_func_info;
2121 u32 number = le32_to_cpu(cap->number);
2122
2123 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2124 func_p->common_cap.ieee_1588 = info->ena;
2125
2126 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2127 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2128 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2129 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2130
2131 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2132 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2133
2134 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2135 func_p->common_cap.ieee_1588);
2136 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2137 info->src_tmr_owned);
2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2139 info->tmr_ena);
2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2141 info->tmr_index_owned);
2142 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2143 info->tmr_index_assoc);
2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2145 info->clk_freq);
2146 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2147 info->clk_src);
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157static void
2158ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2159{
2160 u32 reg_val, val;
2161
2162 reg_val = rd32(hw, GLQF_FD_SIZE);
2163 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2164 GLQF_FD_SIZE_FD_GSIZE_S;
2165 func_p->fd_fltr_guar =
2166 ice_get_num_per_func(hw, val);
2167 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2168 GLQF_FD_SIZE_FD_BSIZE_S;
2169 func_p->fd_fltr_best_effort = val;
2170
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2172 func_p->fd_fltr_guar);
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2174 func_p->fd_fltr_best_effort);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191static void
2192ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2193 void *buf, u32 cap_count)
2194{
2195 struct ice_aqc_list_caps_elem *cap_resp;
2196 u32 i;
2197
2198 cap_resp = buf;
2199
2200 memset(func_p, 0, sizeof(*func_p));
2201
2202 for (i = 0; i < cap_count; i++) {
2203 u16 cap = le16_to_cpu(cap_resp[i].cap);
2204 bool found;
2205
2206 found = ice_parse_common_caps(hw, &func_p->common_cap,
2207 &cap_resp[i], "func caps");
2208
2209 switch (cap) {
2210 case ICE_AQC_CAPS_VF:
2211 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2212 break;
2213 case ICE_AQC_CAPS_VSI:
2214 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2215 break;
2216 case ICE_AQC_CAPS_1588:
2217 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2218 break;
2219 case ICE_AQC_CAPS_FD:
2220 ice_parse_fdir_func_caps(hw, func_p);
2221 break;
2222 default:
2223
2224 if (!found)
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2226 i, cap);
2227 break;
2228 }
2229 }
2230
2231 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2232}
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242static void
2243ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2244 struct ice_aqc_list_caps_elem *cap)
2245{
2246 u32 number = le32_to_cpu(cap->number);
2247
2248 dev_p->num_funcs = hweight32(number);
2249 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2250 dev_p->num_funcs);
2251}
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static void
2262ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2263 struct ice_aqc_list_caps_elem *cap)
2264{
2265 u32 number = le32_to_cpu(cap->number);
2266
2267 dev_p->num_vfs_exposed = number;
2268 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2269 dev_p->num_vfs_exposed);
2270}
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280static void
2281ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2282 struct ice_aqc_list_caps_elem *cap)
2283{
2284 u32 number = le32_to_cpu(cap->number);
2285
2286 dev_p->num_vsi_allocd_to_host = number;
2287 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2288 dev_p->num_vsi_allocd_to_host);
2289}
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299static void
2300ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2301 struct ice_aqc_list_caps_elem *cap)
2302{
2303 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2304 u32 logical_id = le32_to_cpu(cap->logical_id);
2305 u32 phys_id = le32_to_cpu(cap->phys_id);
2306 u32 number = le32_to_cpu(cap->number);
2307
2308 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2309 dev_p->common_cap.ieee_1588 = info->ena;
2310
2311 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2312 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2313 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2314
2315 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2316 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2317 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2318
2319 info->ena_ports = logical_id;
2320 info->tmr_own_map = phys_id;
2321
2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2323 dev_p->common_cap.ieee_1588);
2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2325 info->tmr0_owner);
2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2327 info->tmr0_owned);
2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2329 info->tmr0_ena);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2331 info->tmr1_owner);
2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2333 info->tmr1_owned);
2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2335 info->tmr1_ena);
2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2337 info->ena_ports);
2338 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2339 info->tmr_own_map);
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static void
2351ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2352 struct ice_aqc_list_caps_elem *cap)
2353{
2354 u32 number = le32_to_cpu(cap->number);
2355
2356 dev_p->num_flow_director_fltr = number;
2357 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2358 dev_p->num_flow_director_fltr);
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375static void
2376ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2377 void *buf, u32 cap_count)
2378{
2379 struct ice_aqc_list_caps_elem *cap_resp;
2380 u32 i;
2381
2382 cap_resp = buf;
2383
2384 memset(dev_p, 0, sizeof(*dev_p));
2385
2386 for (i = 0; i < cap_count; i++) {
2387 u16 cap = le16_to_cpu(cap_resp[i].cap);
2388 bool found;
2389
2390 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2391 &cap_resp[i], "dev caps");
2392
2393 switch (cap) {
2394 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2395 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2396 break;
2397 case ICE_AQC_CAPS_VF:
2398 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2399 break;
2400 case ICE_AQC_CAPS_VSI:
2401 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2402 break;
2403 case ICE_AQC_CAPS_1588:
2404 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2405 break;
2406 case ICE_AQC_CAPS_FD:
2407 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2408 break;
2409 default:
2410
2411 if (!found)
2412 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2413 i, cap);
2414 break;
2415 }
2416 }
2417
2418 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2419}
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440enum ice_status
2441ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2442 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2443{
2444 struct ice_aqc_list_caps *cmd;
2445 struct ice_aq_desc desc;
2446 enum ice_status status;
2447
2448 cmd = &desc.params.get_cap;
2449
2450 if (opc != ice_aqc_opc_list_func_caps &&
2451 opc != ice_aqc_opc_list_dev_caps)
2452 return ICE_ERR_PARAM;
2453
2454 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2455 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2456
2457 if (cap_count)
2458 *cap_count = le32_to_cpu(cmd->count);
2459
2460 return status;
2461}
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471enum ice_status
2472ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2473{
2474 enum ice_status status;
2475 u32 cap_count = 0;
2476 void *cbuf;
2477
2478 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2479 if (!cbuf)
2480 return ICE_ERR_NO_MEMORY;
2481
2482
2483
2484
2485
2486 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2487
2488 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2489 ice_aqc_opc_list_dev_caps, NULL);
2490 if (!status)
2491 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2492 kfree(cbuf);
2493
2494 return status;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505static enum ice_status
2506ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2507{
2508 enum ice_status status;
2509 u32 cap_count = 0;
2510 void *cbuf;
2511
2512 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2513 if (!cbuf)
2514 return ICE_ERR_NO_MEMORY;
2515
2516
2517
2518
2519
2520 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2521
2522 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2523 ice_aqc_opc_list_func_caps, NULL);
2524 if (!status)
2525 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2526 kfree(cbuf);
2527
2528 return status;
2529}
2530
2531
2532
2533
2534
2535void ice_set_safe_mode_caps(struct ice_hw *hw)
2536{
2537 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2538 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2539 struct ice_hw_common_caps cached_caps;
2540 u32 num_funcs;
2541
2542
2543 cached_caps = func_caps->common_cap;
2544
2545
2546 memset(func_caps, 0, sizeof(*func_caps));
2547
2548#define ICE_RESTORE_FUNC_CAP(name) \
2549 func_caps->common_cap.name = cached_caps.name
2550
2551
2552 ICE_RESTORE_FUNC_CAP(valid_functions);
2553 ICE_RESTORE_FUNC_CAP(txq_first_id);
2554 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2555 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2556 ICE_RESTORE_FUNC_CAP(max_mtu);
2557 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2559 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2560 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2561
2562
2563 func_caps->common_cap.num_rxq = 1;
2564 func_caps->common_cap.num_txq = 1;
2565
2566
2567 func_caps->common_cap.num_msix_vectors = 2;
2568 func_caps->guar_num_vsi = 1;
2569
2570
2571 cached_caps = dev_caps->common_cap;
2572 num_funcs = dev_caps->num_funcs;
2573
2574
2575 memset(dev_caps, 0, sizeof(*dev_caps));
2576
2577#define ICE_RESTORE_DEV_CAP(name) \
2578 dev_caps->common_cap.name = cached_caps.name
2579
2580
2581 ICE_RESTORE_DEV_CAP(valid_functions);
2582 ICE_RESTORE_DEV_CAP(txq_first_id);
2583 ICE_RESTORE_DEV_CAP(rxq_first_id);
2584 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2585 ICE_RESTORE_DEV_CAP(max_mtu);
2586 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2588 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2589 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2590 dev_caps->num_funcs = num_funcs;
2591
2592
2593 dev_caps->common_cap.num_rxq = num_funcs;
2594 dev_caps->common_cap.num_txq = num_funcs;
2595
2596
2597 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2598}
2599
2600
2601
2602
2603
2604enum ice_status ice_get_caps(struct ice_hw *hw)
2605{
2606 enum ice_status status;
2607
2608 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2609 if (status)
2610 return status;
2611
2612 return ice_discover_func_caps(hw, &hw->func_caps);
2613}
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624enum ice_status
2625ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2626 struct ice_sq_cd *cd)
2627{
2628 struct ice_aqc_manage_mac_write *cmd;
2629 struct ice_aq_desc desc;
2630
2631 cmd = &desc.params.mac_write;
2632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2633
2634 cmd->flags = flags;
2635 ether_addr_copy(cmd->mac_addr, mac_addr);
2636
2637 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2638}
2639
2640
2641
2642
2643
2644
2645
2646static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2647{
2648 struct ice_aq_desc desc;
2649
2650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2651 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2652
2653 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2654}
2655
2656
2657
2658
2659
2660
2661
2662
2663void ice_clear_pxe_mode(struct ice_hw *hw)
2664{
2665 if (ice_check_sq_alive(hw, &hw->adminq))
2666 ice_aq_clear_pxe_mode(hw);
2667}
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682static u16
2683ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2684{
2685 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2687
2688 switch (phy_type_low) {
2689 case ICE_PHY_TYPE_LOW_100BASE_TX:
2690 case ICE_PHY_TYPE_LOW_100M_SGMII:
2691 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2692 break;
2693 case ICE_PHY_TYPE_LOW_1000BASE_T:
2694 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2695 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2696 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2697 case ICE_PHY_TYPE_LOW_1G_SGMII:
2698 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2699 break;
2700 case ICE_PHY_TYPE_LOW_2500BASE_T:
2701 case ICE_PHY_TYPE_LOW_2500BASE_X:
2702 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2703 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2704 break;
2705 case ICE_PHY_TYPE_LOW_5GBASE_T:
2706 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2707 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2708 break;
2709 case ICE_PHY_TYPE_LOW_10GBASE_T:
2710 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2711 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2712 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2713 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2714 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2715 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2716 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2717 break;
2718 case ICE_PHY_TYPE_LOW_25GBASE_T:
2719 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2720 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2721 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2722 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2723 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2724 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2725 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2726 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2727 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2728 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2729 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2730 break;
2731 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2732 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2733 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2734 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2735 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2736 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2737 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2738 break;
2739 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2740 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2741 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2742 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2743 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2744 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2745 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2746 case ICE_PHY_TYPE_LOW_50G_AUI2:
2747 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2748 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2749 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2750 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2751 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2752 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2753 case ICE_PHY_TYPE_LOW_50G_AUI1:
2754 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2755 break;
2756 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2757 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2758 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2759 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2760 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2761 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2762 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2763 case ICE_PHY_TYPE_LOW_100G_AUI4:
2764 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2765 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2766 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2767 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2768 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2769 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2770 break;
2771 default:
2772 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2773 break;
2774 }
2775
2776 switch (phy_type_high) {
2777 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2778 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2779 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2780 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2781 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2782 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2783 break;
2784 default:
2785 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2786 break;
2787 }
2788
2789 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2790 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2791 return ICE_AQ_LINK_SPEED_UNKNOWN;
2792 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2793 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2794 return ICE_AQ_LINK_SPEED_UNKNOWN;
2795 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2796 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2797 return speed_phy_type_low;
2798 else
2799 return speed_phy_type_high;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817void
2818ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2819 u16 link_speeds_bitmap)
2820{
2821 u64 pt_high;
2822 u64 pt_low;
2823 int index;
2824 u16 speed;
2825
2826
2827 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2828 pt_low = BIT_ULL(index);
2829 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2830
2831 if (link_speeds_bitmap & speed)
2832 *phy_type_low |= BIT_ULL(index);
2833 }
2834
2835
2836 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2837 pt_high = BIT_ULL(index);
2838 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2839
2840 if (link_speeds_bitmap & speed)
2841 *phy_type_high |= BIT_ULL(index);
2842 }
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857enum ice_status
2858ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2859 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2860{
2861 struct ice_aq_desc desc;
2862 enum ice_status status;
2863
2864 if (!cfg)
2865 return ICE_ERR_PARAM;
2866
2867
2868 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2869 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2870 cfg->caps);
2871
2872 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2873 }
2874
2875 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2876 desc.params.set_phy.lport_num = pi->lport;
2877 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2878
2879 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2881 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2882 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2883 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2884 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2885 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2886 cfg->low_power_ctrl_an);
2887 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2888 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2889 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2890 cfg->link_fec_opt);
2891
2892 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2893 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2894 status = 0;
2895
2896 if (!status)
2897 pi->phy.curr_user_phy_cfg = *cfg;
2898
2899 return status;
2900}
2901
2902
2903
2904
2905
2906enum ice_status ice_update_link_info(struct ice_port_info *pi)
2907{
2908 struct ice_link_status *li;
2909 enum ice_status status;
2910
2911 if (!pi)
2912 return ICE_ERR_PARAM;
2913
2914 li = &pi->phy.link_info;
2915
2916 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2917 if (status)
2918 return status;
2919
2920 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2921 struct ice_aqc_get_phy_caps_data *pcaps;
2922 struct ice_hw *hw;
2923
2924 hw = pi->hw;
2925 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2926 GFP_KERNEL);
2927 if (!pcaps)
2928 return ICE_ERR_NO_MEMORY;
2929
2930 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2931 pcaps, NULL);
2932
2933 devm_kfree(ice_hw_to_dev(hw), pcaps);
2934 }
2935
2936 return status;
2937}
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947static void
2948ice_cache_phy_user_req(struct ice_port_info *pi,
2949 struct ice_phy_cache_mode_data cache_data,
2950 enum ice_phy_cache_mode cache_mode)
2951{
2952 if (!pi)
2953 return;
2954
2955 switch (cache_mode) {
2956 case ICE_FC_MODE:
2957 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2958 break;
2959 case ICE_SPEED_MODE:
2960 pi->phy.curr_user_speed_req =
2961 cache_data.data.curr_user_speed_req;
2962 break;
2963 case ICE_FEC_MODE:
2964 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2965 break;
2966 default:
2967 break;
2968 }
2969}
2970
2971
2972
2973
2974
2975
2976
2977enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2978{
2979 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2980 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2981 return ICE_FC_FULL;
2982
2983 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2984 return ICE_FC_TX_PAUSE;
2985
2986 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2987 return ICE_FC_RX_PAUSE;
2988
2989 return ICE_FC_NONE;
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3000{
3001 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3002 return ICE_FEC_AUTO;
3003
3004 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3005 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3006 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3007 ICE_AQC_PHY_FEC_25G_KR_REQ))
3008 return ICE_FEC_BASER;
3009
3010 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3011 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3012 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3013 return ICE_FEC_RS;
3014
3015 return ICE_FEC_NONE;
3016}
3017
3018
3019
3020
3021
3022
3023
3024enum ice_status
3025ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3026 enum ice_fc_mode req_mode)
3027{
3028 struct ice_phy_cache_mode_data cache_data;
3029 u8 pause_mask = 0x0;
3030
3031 if (!pi || !cfg)
3032 return ICE_ERR_BAD_PTR;
3033
3034 switch (req_mode) {
3035 case ICE_FC_FULL:
3036 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3037 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3038 break;
3039 case ICE_FC_RX_PAUSE:
3040 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3041 break;
3042 case ICE_FC_TX_PAUSE:
3043 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3044 break;
3045 default:
3046 break;
3047 }
3048
3049
3050 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3051 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3052
3053
3054 cfg->caps |= pause_mask;
3055
3056
3057 cache_data.data.curr_user_fc_req = req_mode;
3058 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3059
3060 return 0;
3061}
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071enum ice_status
3072ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3073{
3074 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3075 struct ice_aqc_get_phy_caps_data *pcaps;
3076 enum ice_status status;
3077 struct ice_hw *hw;
3078
3079 if (!pi || !aq_failures)
3080 return ICE_ERR_BAD_PTR;
3081
3082 *aq_failures = 0;
3083 hw = pi->hw;
3084
3085 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3086 if (!pcaps)
3087 return ICE_ERR_NO_MEMORY;
3088
3089
3090 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3091 pcaps, NULL);
3092 if (status) {
3093 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3094 goto out;
3095 }
3096
3097 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3098
3099
3100 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3101 if (status)
3102 goto out;
3103
3104
3105 if (cfg.caps != pcaps->caps) {
3106 int retry_count, retry_max = 10;
3107
3108
3109 if (ena_auto_link_update)
3110 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3111
3112 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3113 if (status) {
3114 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3115 goto out;
3116 }
3117
3118
3119
3120
3121
3122
3123 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3124 status = ice_update_link_info(pi);
3125
3126 if (!status)
3127 break;
3128
3129 mdelay(100);
3130 }
3131
3132 if (status)
3133 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3134 }
3135
3136out:
3137 devm_kfree(ice_hw_to_dev(hw), pcaps);
3138 return status;
3139}
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149bool
3150ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3151 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3152{
3153 u8 caps_mask, cfg_mask;
3154
3155 if (!phy_caps || !phy_cfg)
3156 return false;
3157
3158
3159
3160
3161 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3162 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3163 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3164
3165 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3166 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3167 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3168 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3169 phy_caps->eee_cap != phy_cfg->eee_cap ||
3170 phy_caps->eeer_value != phy_cfg->eeer_value ||
3171 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3172 return false;
3173
3174 return true;
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186void
3187ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3188 struct ice_aqc_get_phy_caps_data *caps,
3189 struct ice_aqc_set_phy_cfg_data *cfg)
3190{
3191 if (!pi || !caps || !cfg)
3192 return;
3193
3194 memset(cfg, 0, sizeof(*cfg));
3195 cfg->phy_type_low = caps->phy_type_low;
3196 cfg->phy_type_high = caps->phy_type_high;
3197 cfg->caps = caps->caps;
3198 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3199 cfg->eee_cap = caps->eee_cap;
3200 cfg->eeer_value = caps->eeer_value;
3201 cfg->link_fec_opt = caps->link_fec_options;
3202 cfg->module_compliance_enforcement =
3203 caps->module_compliance_enforcement;
3204}
3205
3206
3207
3208
3209
3210
3211
3212enum ice_status
3213ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3214 enum ice_fec_mode fec)
3215{
3216 struct ice_aqc_get_phy_caps_data *pcaps;
3217 enum ice_status status;
3218 struct ice_hw *hw;
3219
3220 if (!pi || !cfg)
3221 return ICE_ERR_BAD_PTR;
3222
3223 hw = pi->hw;
3224
3225 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3226 if (!pcaps)
3227 return ICE_ERR_NO_MEMORY;
3228
3229 status = ice_aq_get_phy_caps(pi, false,
3230 (ice_fw_supports_report_dflt_cfg(hw) ?
3231 ICE_AQC_REPORT_DFLT_CFG :
3232 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3233 if (status)
3234 goto out;
3235
3236 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3237 cfg->link_fec_opt = pcaps->link_fec_options;
3238
3239 switch (fec) {
3240 case ICE_FEC_BASER:
3241
3242
3243
3244 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3245 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3246 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3247 ICE_AQC_PHY_FEC_25G_KR_REQ;
3248 break;
3249 case ICE_FEC_RS:
3250
3251
3252
3253 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3254 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3255 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3256 break;
3257 case ICE_FEC_NONE:
3258
3259 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3260 break;
3261 case ICE_FEC_AUTO:
3262
3263 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3264 cfg->link_fec_opt |= pcaps->link_fec_options;
3265 break;
3266 default:
3267 status = ICE_ERR_PARAM;
3268 break;
3269 }
3270
3271 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3272 !ice_fw_supports_report_dflt_cfg(hw)) {
3273 struct ice_link_default_override_tlv tlv;
3274
3275 if (ice_get_link_default_override(&tlv, pi))
3276 goto out;
3277
3278 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3279 (tlv.options & ICE_LINK_OVERRIDE_EN))
3280 cfg->link_fec_opt = tlv.fec_options;
3281 }
3282
3283out:
3284 kfree(pcaps);
3285
3286 return status;
3287}
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3299{
3300 struct ice_phy_info *phy_info;
3301 enum ice_status status = 0;
3302
3303 if (!pi || !link_up)
3304 return ICE_ERR_PARAM;
3305
3306 phy_info = &pi->phy;
3307
3308 if (phy_info->get_link_info) {
3309 status = ice_update_link_info(pi);
3310
3311 if (status)
3312 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3313 status);
3314 }
3315
3316 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3317
3318 return status;
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329enum ice_status
3330ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3331 struct ice_sq_cd *cd)
3332{
3333 struct ice_aqc_restart_an *cmd;
3334 struct ice_aq_desc desc;
3335
3336 cmd = &desc.params.restart_an;
3337
3338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3339
3340 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3341 cmd->lport_num = pi->lport;
3342 if (ena_link)
3343 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3344 else
3345 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3346
3347 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3348}
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359enum ice_status
3360ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3361 struct ice_sq_cd *cd)
3362{
3363 struct ice_aqc_set_event_mask *cmd;
3364 struct ice_aq_desc desc;
3365
3366 cmd = &desc.params.set_event_mask;
3367
3368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3369
3370 cmd->lport_num = port_num;
3371
3372 cmd->event_mask = cpu_to_le16(mask);
3373 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3374}
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384enum ice_status
3385ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3386{
3387 struct ice_aqc_set_mac_lb *cmd;
3388 struct ice_aq_desc desc;
3389
3390 cmd = &desc.params.set_mac_lb;
3391
3392 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3393 if (ena_lpbk)
3394 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3395
3396 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407enum ice_status
3408ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3409 struct ice_sq_cd *cd)
3410{
3411 struct ice_aqc_set_port_id_led *cmd;
3412 struct ice_hw *hw = pi->hw;
3413 struct ice_aq_desc desc;
3414
3415 cmd = &desc.params.set_port_id_led;
3416
3417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3418
3419 if (is_orig_mode)
3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3421 else
3422 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3423
3424 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3425}
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442enum ice_status
3443ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3444 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3445 bool write, struct ice_sq_cd *cd)
3446{
3447 struct ice_aqc_sff_eeprom *cmd;
3448 struct ice_aq_desc desc;
3449 enum ice_status status;
3450
3451 if (!data || (mem_addr & 0xff00))
3452 return ICE_ERR_PARAM;
3453
3454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3455 cmd = &desc.params.read_write_sff_param;
3456 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3457 cmd->lport_num = (u8)(lport & 0xff);
3458 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3459 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3460 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3461 ((set_page <<
3462 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3463 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3464 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3465 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3466 if (write)
3467 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3468
3469 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3470 return status;
3471}
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481static enum ice_status
3482__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3483{
3484 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3485 struct ice_aqc_get_set_rss_lut *cmd_resp;
3486 struct ice_aq_desc desc;
3487 enum ice_status status;
3488 u8 *lut;
3489
3490 if (!params)
3491 return ICE_ERR_PARAM;
3492
3493 vsi_handle = params->vsi_handle;
3494 lut = params->lut;
3495
3496 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3497 return ICE_ERR_PARAM;
3498
3499 lut_size = params->lut_size;
3500 lut_type = params->lut_type;
3501 glob_lut_idx = params->global_lut_id;
3502 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3503
3504 cmd_resp = &desc.params.get_set_rss_lut;
3505
3506 if (set) {
3507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3508 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3509 } else {
3510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3511 }
3512
3513 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3514 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3515 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3516 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3517
3518 switch (lut_type) {
3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3520 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3521 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3522 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3523 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3524 break;
3525 default:
3526 status = ICE_ERR_PARAM;
3527 goto ice_aq_get_set_rss_lut_exit;
3528 }
3529
3530 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3531 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3532 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3533
3534 if (!set)
3535 goto ice_aq_get_set_rss_lut_send;
3536 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3537 if (!set)
3538 goto ice_aq_get_set_rss_lut_send;
3539 } else {
3540 goto ice_aq_get_set_rss_lut_send;
3541 }
3542
3543
3544 switch (lut_size) {
3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3546 break;
3547 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3548 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3549 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3550 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3551 break;
3552 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3553 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3554 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3555 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3556 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3557 break;
3558 }
3559 fallthrough;
3560 default:
3561 status = ICE_ERR_PARAM;
3562 goto ice_aq_get_set_rss_lut_exit;
3563 }
3564
3565ice_aq_get_set_rss_lut_send:
3566 cmd_resp->flags = cpu_to_le16(flags);
3567 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3568
3569ice_aq_get_set_rss_lut_exit:
3570 return status;
3571}
3572
3573
3574
3575
3576
3577
3578
3579
3580enum ice_status
3581ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3582{
3583 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3584}
3585
3586
3587
3588
3589
3590
3591
3592
3593enum ice_status
3594ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3595{
3596 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3597}
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608static enum
3609ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3610 struct ice_aqc_get_set_rss_keys *key,
3611 bool set)
3612{
3613 struct ice_aqc_get_set_rss_key *cmd_resp;
3614 u16 key_size = sizeof(*key);
3615 struct ice_aq_desc desc;
3616
3617 cmd_resp = &desc.params.get_set_rss_key;
3618
3619 if (set) {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3621 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3622 } else {
3623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3624 }
3625
3626 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3627 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3628 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3629 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3630
3631 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3632}
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642enum ice_status
3643ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3644 struct ice_aqc_get_set_rss_keys *key)
3645{
3646 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3647 return ICE_ERR_PARAM;
3648
3649 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3650 key, false);
3651}
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661enum ice_status
3662ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3663 struct ice_aqc_get_set_rss_keys *keys)
3664{
3665 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3666 return ICE_ERR_PARAM;
3667
3668 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3669 keys, true);
3670}
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693static enum ice_status
3694ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3695 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3696 struct ice_sq_cd *cd)
3697{
3698 struct ice_aqc_add_tx_qgrp *list;
3699 struct ice_aqc_add_txqs *cmd;
3700 struct ice_aq_desc desc;
3701 u16 i, sum_size = 0;
3702
3703 cmd = &desc.params.add_txqs;
3704
3705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3706
3707 if (!qg_list)
3708 return ICE_ERR_PARAM;
3709
3710 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3711 return ICE_ERR_PARAM;
3712
3713 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3714 sum_size += struct_size(list, txqs, list->num_txqs);
3715 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3716 list->num_txqs);
3717 }
3718
3719 if (buf_size != sum_size)
3720 return ICE_ERR_PARAM;
3721
3722 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3723
3724 cmd->num_qgrps = num_qgrps;
3725
3726 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3727}
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741static enum ice_status
3742ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3743 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3744 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3745 struct ice_sq_cd *cd)
3746{
3747 struct ice_aqc_dis_txq_item *item;
3748 struct ice_aqc_dis_txqs *cmd;
3749 struct ice_aq_desc desc;
3750 enum ice_status status;
3751 u16 i, sz = 0;
3752
3753 cmd = &desc.params.dis_txqs;
3754 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3755
3756
3757 if (!qg_list && !rst_src)
3758 return ICE_ERR_PARAM;
3759
3760 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3761 return ICE_ERR_PARAM;
3762
3763 cmd->num_entries = num_qgrps;
3764
3765 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3766 ICE_AQC_Q_DIS_TIMEOUT_M);
3767
3768 switch (rst_src) {
3769 case ICE_VM_RESET:
3770 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3771 cmd->vmvf_and_timeout |=
3772 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3773 break;
3774 case ICE_VF_RESET:
3775 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3776
3777 cmd->vmvf_and_timeout |=
3778 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3779 ICE_AQC_Q_DIS_VMVF_NUM_M);
3780 break;
3781 case ICE_NO_RESET:
3782 default:
3783 break;
3784 }
3785
3786
3787 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3788
3789 if (!qg_list)
3790 goto do_aq;
3791
3792
3793
3794
3795 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3796
3797 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3798 u16 item_size = struct_size(item, q_id, item->num_qs);
3799
3800
3801 if ((item->num_qs % 2) == 0)
3802 item_size += 2;
3803
3804 sz += item_size;
3805
3806 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3807 }
3808
3809 if (buf_size != sz)
3810 return ICE_ERR_PARAM;
3811
3812do_aq:
3813 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3814 if (status) {
3815 if (!qg_list)
3816 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3817 vmvf_num, hw->adminq.sq_last_status);
3818 else
3819 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3820 le16_to_cpu(qg_list[0].q_id[0]),
3821 hw->adminq.sq_last_status);
3822 }
3823 return status;
3824}
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836static int
3837ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3838 struct ice_aqc_add_rdma_qset_data *qset_list,
3839 u16 buf_size, struct ice_sq_cd *cd)
3840{
3841 struct ice_aqc_add_rdma_qset_data *list;
3842 struct ice_aqc_add_rdma_qset *cmd;
3843 struct ice_aq_desc desc;
3844 u16 i, sum_size = 0;
3845
3846 cmd = &desc.params.add_rdma_qset;
3847
3848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3849
3850 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3851 return -EINVAL;
3852
3853 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3854 u16 num_qsets = le16_to_cpu(list->num_qsets);
3855
3856 sum_size += struct_size(list, rdma_qsets, num_qsets);
3857 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3858 num_qsets);
3859 }
3860
3861 if (buf_size != sum_size)
3862 return -EINVAL;
3863
3864 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3865
3866 cmd->num_qset_grps = num_qset_grps;
3867
3868 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
3869 buf_size, cd));
3870}
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880static void
3881ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3882{
3883 u8 src_byte, dest_byte, mask;
3884 u8 *from, *dest;
3885 u16 shift_width;
3886
3887
3888 from = src_ctx + ce_info->offset;
3889
3890
3891 shift_width = ce_info->lsb % 8;
3892 mask = (u8)(BIT(ce_info->width) - 1);
3893
3894 src_byte = *from;
3895 src_byte &= mask;
3896
3897
3898 mask <<= shift_width;
3899 src_byte <<= shift_width;
3900
3901
3902 dest = dest_ctx + (ce_info->lsb / 8);
3903
3904 memcpy(&dest_byte, dest, sizeof(dest_byte));
3905
3906 dest_byte &= ~mask;
3907 dest_byte |= src_byte;
3908
3909
3910 memcpy(dest, &dest_byte, sizeof(dest_byte));
3911}
3912
3913
3914
3915
3916
3917
3918
3919static void
3920ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3921{
3922 u16 src_word, mask;
3923 __le16 dest_word;
3924 u8 *from, *dest;
3925 u16 shift_width;
3926
3927
3928 from = src_ctx + ce_info->offset;
3929
3930
3931 shift_width = ce_info->lsb % 8;
3932 mask = BIT(ce_info->width) - 1;
3933
3934
3935
3936
3937 src_word = *(u16 *)from;
3938 src_word &= mask;
3939
3940
3941 mask <<= shift_width;
3942 src_word <<= shift_width;
3943
3944
3945 dest = dest_ctx + (ce_info->lsb / 8);
3946
3947 memcpy(&dest_word, dest, sizeof(dest_word));
3948
3949 dest_word &= ~(cpu_to_le16(mask));
3950 dest_word |= cpu_to_le16(src_word);
3951
3952
3953 memcpy(dest, &dest_word, sizeof(dest_word));
3954}
3955
3956
3957
3958
3959
3960
3961
3962static void
3963ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3964{
3965 u32 src_dword, mask;
3966 __le32 dest_dword;
3967 u8 *from, *dest;
3968 u16 shift_width;
3969
3970
3971 from = src_ctx + ce_info->offset;
3972
3973
3974 shift_width = ce_info->lsb % 8;
3975
3976
3977
3978
3979
3980 if (ce_info->width < 32)
3981 mask = BIT(ce_info->width) - 1;
3982 else
3983 mask = (u32)~0;
3984
3985
3986
3987
3988 src_dword = *(u32 *)from;
3989 src_dword &= mask;
3990
3991
3992 mask <<= shift_width;
3993 src_dword <<= shift_width;
3994
3995
3996 dest = dest_ctx + (ce_info->lsb / 8);
3997
3998 memcpy(&dest_dword, dest, sizeof(dest_dword));
3999
4000 dest_dword &= ~(cpu_to_le32(mask));
4001 dest_dword |= cpu_to_le32(src_dword);
4002
4003
4004 memcpy(dest, &dest_dword, sizeof(dest_dword));
4005}
4006
4007
4008
4009
4010
4011
4012
4013static void
4014ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4015{
4016 u64 src_qword, mask;
4017 __le64 dest_qword;
4018 u8 *from, *dest;
4019 u16 shift_width;
4020
4021
4022 from = src_ctx + ce_info->offset;
4023
4024
4025 shift_width = ce_info->lsb % 8;
4026
4027
4028
4029
4030
4031 if (ce_info->width < 64)
4032 mask = BIT_ULL(ce_info->width) - 1;
4033 else
4034 mask = (u64)~0;
4035
4036
4037
4038
4039 src_qword = *(u64 *)from;
4040 src_qword &= mask;
4041
4042
4043 mask <<= shift_width;
4044 src_qword <<= shift_width;
4045
4046
4047 dest = dest_ctx + (ce_info->lsb / 8);
4048
4049 memcpy(&dest_qword, dest, sizeof(dest_qword));
4050
4051 dest_qword &= ~(cpu_to_le64(mask));
4052 dest_qword |= cpu_to_le64(src_qword);
4053
4054
4055 memcpy(dest, &dest_qword, sizeof(dest_qword));
4056}
4057
4058
4059
4060
4061
4062
4063
4064
4065enum ice_status
4066ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4067 const struct ice_ctx_ele *ce_info)
4068{
4069 int f;
4070
4071 for (f = 0; ce_info[f].width; f++) {
4072
4073
4074
4075
4076 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4077 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4078 f, ce_info[f].width, ce_info[f].size_of);
4079 continue;
4080 }
4081 switch (ce_info[f].size_of) {
4082 case sizeof(u8):
4083 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4084 break;
4085 case sizeof(u16):
4086 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4087 break;
4088 case sizeof(u32):
4089 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4090 break;
4091 case sizeof(u64):
4092 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4093 break;
4094 default:
4095 return ICE_ERR_INVAL_SIZE;
4096 }
4097 }
4098
4099 return 0;
4100}
4101
4102
4103
4104
4105
4106
4107
4108
4109struct ice_q_ctx *
4110ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4111{
4112 struct ice_vsi_ctx *vsi;
4113 struct ice_q_ctx *q_ctx;
4114
4115 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4116 if (!vsi)
4117 return NULL;
4118 if (q_handle >= vsi->num_lan_q_entries[tc])
4119 return NULL;
4120 if (!vsi->lan_q_ctx[tc])
4121 return NULL;
4122 q_ctx = vsi->lan_q_ctx[tc];
4123 return &q_ctx[q_handle];
4124}
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139enum ice_status
4140ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4141 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4142 struct ice_sq_cd *cd)
4143{
4144 struct ice_aqc_txsched_elem_data node = { 0 };
4145 struct ice_sched_node *parent;
4146 struct ice_q_ctx *q_ctx;
4147 enum ice_status status;
4148 struct ice_hw *hw;
4149
4150 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4151 return ICE_ERR_CFG;
4152
4153 if (num_qgrps > 1 || buf->num_txqs > 1)
4154 return ICE_ERR_MAX_LIMIT;
4155
4156 hw = pi->hw;
4157
4158 if (!ice_is_vsi_valid(hw, vsi_handle))
4159 return ICE_ERR_PARAM;
4160
4161 mutex_lock(&pi->sched_lock);
4162
4163 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4164 if (!q_ctx) {
4165 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4166 q_handle);
4167 status = ICE_ERR_PARAM;
4168 goto ena_txq_exit;
4169 }
4170
4171
4172 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4173 ICE_SCHED_NODE_OWNER_LAN);
4174 if (!parent) {
4175 status = ICE_ERR_PARAM;
4176 goto ena_txq_exit;
4177 }
4178
4179 buf->parent_teid = parent->info.node_teid;
4180 node.parent_teid = parent->info.node_teid;
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192 buf->txqs[0].info.valid_sections =
4193 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4194 ICE_AQC_ELEM_VALID_EIR;
4195 buf->txqs[0].info.generic = 0;
4196 buf->txqs[0].info.cir_bw.bw_profile_idx =
4197 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4198 buf->txqs[0].info.cir_bw.bw_alloc =
4199 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4200 buf->txqs[0].info.eir_bw.bw_profile_idx =
4201 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4202 buf->txqs[0].info.eir_bw.bw_alloc =
4203 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4204
4205
4206 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4207 if (status) {
4208 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4209 le16_to_cpu(buf->txqs[0].txq_id),
4210 hw->adminq.sq_last_status);
4211 goto ena_txq_exit;
4212 }
4213
4214 node.node_teid = buf->txqs[0].q_teid;
4215 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4216 q_ctx->q_handle = q_handle;
4217 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4218
4219
4220 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4221 if (!status)
4222 status = ice_sched_replay_q_bw(pi, q_ctx);
4223
4224ena_txq_exit:
4225 mutex_unlock(&pi->sched_lock);
4226 return status;
4227}
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244enum ice_status
4245ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4246 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4247 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4248 struct ice_sq_cd *cd)
4249{
4250 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4251 struct ice_aqc_dis_txq_item *qg_list;
4252 struct ice_q_ctx *q_ctx;
4253 struct ice_hw *hw;
4254 u16 i, buf_size;
4255
4256 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4257 return ICE_ERR_CFG;
4258
4259 hw = pi->hw;
4260
4261 if (!num_queues) {
4262
4263
4264
4265
4266 if (rst_src)
4267 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4268 vmvf_num, NULL);
4269 return ICE_ERR_CFG;
4270 }
4271
4272 buf_size = struct_size(qg_list, q_id, 1);
4273 qg_list = kzalloc(buf_size, GFP_KERNEL);
4274 if (!qg_list)
4275 return ICE_ERR_NO_MEMORY;
4276
4277 mutex_lock(&pi->sched_lock);
4278
4279 for (i = 0; i < num_queues; i++) {
4280 struct ice_sched_node *node;
4281
4282 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4283 if (!node)
4284 continue;
4285 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4286 if (!q_ctx) {
4287 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4288 q_handles[i]);
4289 continue;
4290 }
4291 if (q_ctx->q_handle != q_handles[i]) {
4292 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4293 q_ctx->q_handle, q_handles[i]);
4294 continue;
4295 }
4296 qg_list->parent_teid = node->info.parent_teid;
4297 qg_list->num_qs = 1;
4298 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4299 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4300 vmvf_num, cd);
4301
4302 if (status)
4303 break;
4304 ice_free_sched_node(pi, node);
4305 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4306 }
4307 mutex_unlock(&pi->sched_lock);
4308 kfree(qg_list);
4309 return status;
4310}
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322static enum ice_status
4323ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4324 u16 *maxqs, u8 owner)
4325{
4326 enum ice_status status = 0;
4327 u8 i;
4328
4329 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4330 return ICE_ERR_CFG;
4331
4332 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4333 return ICE_ERR_PARAM;
4334
4335 mutex_lock(&pi->sched_lock);
4336
4337 ice_for_each_traffic_class(i) {
4338
4339 if (!ice_sched_get_tc_node(pi, i))
4340 continue;
4341
4342 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4343 ice_is_tc_ena(tc_bitmap, i));
4344 if (status)
4345 break;
4346 }
4347
4348 mutex_unlock(&pi->sched_lock);
4349 return status;
4350}
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361enum ice_status
4362ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4363 u16 *max_lanqs)
4364{
4365 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4366 ICE_SCHED_NODE_OWNER_LAN);
4367}
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378int
4379ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4380 u16 *max_rdmaqs)
4381{
4382 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
4383 max_rdmaqs,
4384 ICE_SCHED_NODE_OWNER_RDMA));
4385}
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398int
4399ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4400 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4401{
4402 struct ice_aqc_txsched_elem_data node = { 0 };
4403 struct ice_aqc_add_rdma_qset_data *buf;
4404 struct ice_sched_node *parent;
4405 enum ice_status status;
4406 struct ice_hw *hw;
4407 u16 i, buf_size;
4408 int ret;
4409
4410 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4411 return -EIO;
4412 hw = pi->hw;
4413
4414 if (!ice_is_vsi_valid(hw, vsi_handle))
4415 return -EINVAL;
4416
4417 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4418 buf = kzalloc(buf_size, GFP_KERNEL);
4419 if (!buf)
4420 return -ENOMEM;
4421 mutex_lock(&pi->sched_lock);
4422
4423 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4424 ICE_SCHED_NODE_OWNER_RDMA);
4425 if (!parent) {
4426 ret = -EINVAL;
4427 goto rdma_error_exit;
4428 }
4429 buf->parent_teid = parent->info.node_teid;
4430 node.parent_teid = parent->info.node_teid;
4431
4432 buf->num_qsets = cpu_to_le16(num_qsets);
4433 for (i = 0; i < num_qsets; i++) {
4434 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4435 buf->rdma_qsets[i].info.valid_sections =
4436 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4437 ICE_AQC_ELEM_VALID_EIR;
4438 buf->rdma_qsets[i].info.generic = 0;
4439 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4440 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4441 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4442 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4443 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4444 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4445 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4446 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4447 }
4448 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4449 if (ret) {
4450 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4451 goto rdma_error_exit;
4452 }
4453 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4454 for (i = 0; i < num_qsets; i++) {
4455 node.node_teid = buf->rdma_qsets[i].qset_teid;
4456 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4457 &node);
4458 if (status) {
4459 ret = ice_status_to_errno(status);
4460 break;
4461 }
4462 qset_teid[i] = le32_to_cpu(node.node_teid);
4463 }
4464rdma_error_exit:
4465 mutex_unlock(&pi->sched_lock);
4466 kfree(buf);
4467 return ret;
4468}
4469
4470
4471
4472
4473
4474
4475
4476
4477int
4478ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4479 u16 *q_id)
4480{
4481 struct ice_aqc_dis_txq_item *qg_list;
4482 enum ice_status status = 0;
4483 struct ice_hw *hw;
4484 u16 qg_size;
4485 int i;
4486
4487 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4488 return -EIO;
4489
4490 hw = pi->hw;
4491
4492 qg_size = struct_size(qg_list, q_id, 1);
4493 qg_list = kzalloc(qg_size, GFP_KERNEL);
4494 if (!qg_list)
4495 return -ENOMEM;
4496
4497 mutex_lock(&pi->sched_lock);
4498
4499 for (i = 0; i < count; i++) {
4500 struct ice_sched_node *node;
4501
4502 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4503 if (!node)
4504 continue;
4505
4506 qg_list->parent_teid = node->info.parent_teid;
4507 qg_list->num_qs = 1;
4508 qg_list->q_id[0] =
4509 cpu_to_le16(q_id[i] |
4510 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4511
4512 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4513 ICE_NO_RESET, 0, NULL);
4514 if (status)
4515 break;
4516
4517 ice_free_sched_node(pi, node);
4518 }
4519
4520 mutex_unlock(&pi->sched_lock);
4521 kfree(qg_list);
4522 return ice_status_to_errno(status);
4523}
4524
4525
4526
4527
4528
4529
4530
4531static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4532{
4533 struct ice_switch_info *sw = hw->switch_info;
4534 u8 i;
4535
4536
4537 ice_rm_all_sw_replay_rule_info(hw);
4538
4539
4540
4541
4542 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4543 list_replace_init(&sw->recp_list[i].filt_rules,
4544 &sw->recp_list[i].filt_replay_rules);
4545 ice_sched_replay_agg_vsi_preinit(hw);
4546
4547 return 0;
4548}
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4559{
4560 enum ice_status status;
4561
4562 if (!ice_is_vsi_valid(hw, vsi_handle))
4563 return ICE_ERR_PARAM;
4564
4565
4566 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4567 status = ice_replay_pre_init(hw);
4568 if (status)
4569 return status;
4570 }
4571
4572 status = ice_replay_rss_cfg(hw, vsi_handle);
4573 if (status)
4574 return status;
4575
4576 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4577 if (!status)
4578 status = ice_replay_vsi_agg(hw, vsi_handle);
4579 return status;
4580}
4581
4582
4583
4584
4585
4586
4587
4588void ice_replay_post(struct ice_hw *hw)
4589{
4590
4591 ice_rm_all_sw_replay_rule_info(hw);
4592 ice_sched_replay_agg(hw);
4593}
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603void
4604ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4605 u64 *prev_stat, u64 *cur_stat)
4606{
4607 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4608
4609
4610
4611
4612
4613
4614 if (!prev_stat_loaded) {
4615 *prev_stat = new_data;
4616 return;
4617 }
4618
4619
4620
4621
4622 if (new_data >= *prev_stat)
4623 *cur_stat += new_data - *prev_stat;
4624 else
4625
4626 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4627
4628
4629 *prev_stat = new_data;
4630}
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640void
4641ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4642 u64 *prev_stat, u64 *cur_stat)
4643{
4644 u32 new_data;
4645
4646 new_data = rd32(hw, reg);
4647
4648
4649
4650
4651
4652
4653 if (!prev_stat_loaded) {
4654 *prev_stat = new_data;
4655 return;
4656 }
4657
4658
4659
4660
4661 if (new_data >= *prev_stat)
4662 *cur_stat += new_data - *prev_stat;
4663 else
4664
4665 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4666
4667
4668 *prev_stat = new_data;
4669}
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679enum ice_status
4680ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4681 struct ice_aqc_txsched_elem_data *buf)
4682{
4683 u16 buf_size, num_elem_ret = 0;
4684 enum ice_status status;
4685
4686 buf_size = sizeof(*buf);
4687 memset(buf, 0, buf_size);
4688 buf->node_teid = cpu_to_le32(node_teid);
4689 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4690 NULL);
4691 if (status || num_elem_ret != 1)
4692 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4693 return status;
4694}
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711int
4712ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4713 u32 value, struct ice_sq_cd *cd)
4714{
4715 struct ice_aqc_driver_shared_params *cmd;
4716 struct ice_aq_desc desc;
4717
4718 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4719 return -EIO;
4720
4721 cmd = &desc.params.drv_shared_params;
4722
4723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4724
4725 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4726 cmd->param_indx = idx;
4727 cmd->param_val = cpu_to_le32(value);
4728
4729 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
4730}
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744int
4745ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4746 u32 *value, struct ice_sq_cd *cd)
4747{
4748 struct ice_aqc_driver_shared_params *cmd;
4749 struct ice_aq_desc desc;
4750 enum ice_status status;
4751
4752 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4753 return -EIO;
4754
4755 cmd = &desc.params.drv_shared_params;
4756
4757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4758
4759 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4760 cmd->param_indx = idx;
4761
4762 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4763 if (status)
4764 return ice_status_to_errno(status);
4765
4766 *value = le32_to_cpu(cmd->param_val);
4767
4768 return 0;
4769}
4770
4771
4772
4773
4774
4775
4776
4777bool ice_fw_supports_link_override(struct ice_hw *hw)
4778{
4779 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4780 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4781 return true;
4782 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4783 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4784 return true;
4785 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4786 return true;
4787 }
4788
4789 return false;
4790}
4791
4792
4793
4794
4795
4796
4797
4798
4799enum ice_status
4800ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4801 struct ice_port_info *pi)
4802{
4803 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4804 struct ice_hw *hw = pi->hw;
4805 enum ice_status status;
4806
4807 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4808 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4809 if (status) {
4810 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4811 return status;
4812 }
4813
4814
4815 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4816 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4817
4818
4819 status = ice_read_sr_word(hw, tlv_start, &buf);
4820 if (status) {
4821 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4822 return status;
4823 }
4824 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4825 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4826 ICE_LINK_OVERRIDE_PHY_CFG_S;
4827
4828
4829 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4830 status = ice_read_sr_word(hw, offset, &buf);
4831 if (status) {
4832 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4833 return status;
4834 }
4835 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4836
4837
4838 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4839 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4840 status = ice_read_sr_word(hw, (offset + i), &buf);
4841 if (status) {
4842 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4843 return status;
4844 }
4845
4846 ldo->phy_type_low |= ((u64)buf << (i * 16));
4847 }
4848
4849
4850 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4851 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4852 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4853 status = ice_read_sr_word(hw, (offset + i), &buf);
4854 if (status) {
4855 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4856 return status;
4857 }
4858
4859 ldo->phy_type_high |= ((u64)buf << (i * 16));
4860 }
4861
4862 return status;
4863}
4864
4865
4866
4867
4868
4869bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4870{
4871 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4872 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4873 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4874 ICE_AQC_PHY_AN_EN_CLAUSE37))
4875 return true;
4876
4877 return false;
4878}
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890enum ice_status
4891ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4892 struct ice_sq_cd *cd)
4893{
4894 struct ice_aqc_lldp_set_local_mib *cmd;
4895 struct ice_aq_desc desc;
4896
4897 cmd = &desc.params.lldp_set_mib;
4898
4899 if (buf_size == 0 || !buf)
4900 return ICE_ERR_PARAM;
4901
4902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4903
4904 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4905 desc.datalen = cpu_to_le16(buf_size);
4906
4907 cmd->type = mib_type;
4908 cmd->length = cpu_to_le16(buf_size);
4909
4910 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4911}
4912
4913
4914
4915
4916
4917bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4918{
4919 if (hw->mac_type != ICE_MAC_E810)
4920 return false;
4921
4922 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4923 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4924 return true;
4925 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4926 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4927 return true;
4928 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4929 return true;
4930 }
4931 return false;
4932}
4933
4934
4935
4936
4937
4938
4939
4940enum ice_status
4941ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4942{
4943 struct ice_aqc_lldp_filter_ctrl *cmd;
4944 struct ice_aq_desc desc;
4945
4946 cmd = &desc.params.lldp_filter_ctrl;
4947
4948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4949
4950 if (add)
4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4952 else
4953 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4954
4955 cmd->vsi_num = cpu_to_le16(vsi_num);
4956
4957 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4958}
4959
4960
4961
4962
4963
4964
4965
4966bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4967{
4968 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4969 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4970 return true;
4971 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4972 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4973 return true;
4974 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4975 return true;
4976 }
4977 return false;
4978}
4979