1
2
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8
9#define ICE_PF_RESET_WAIT_COUNT 300
10
11
12
13
14
15
16
17
18static int ice_set_mac_type(struct ice_hw *hw)
19{
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return -ENODEV;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_BACKPLANE:
28 case ICE_DEV_ID_E810_XXV_QSFP:
29 case ICE_DEV_ID_E810_XXV_SFP:
30 hw->mac_type = ICE_MAC_E810;
31 break;
32 case ICE_DEV_ID_E823C_10G_BASE_T:
33 case ICE_DEV_ID_E823C_BACKPLANE:
34 case ICE_DEV_ID_E823C_QSFP:
35 case ICE_DEV_ID_E823C_SFP:
36 case ICE_DEV_ID_E823C_SGMII:
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
52 break;
53 default:
54 hw->mac_type = ICE_MAC_UNKNOWN;
55 break;
56 }
57
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 return 0;
60}
61
62
63
64
65
66
67
68bool ice_is_e810(struct ice_hw *hw)
69{
70 return hw->mac_type == ICE_MAC_E810;
71}
72
73
74
75
76
77
78
79bool ice_is_e810t(struct ice_hw *hw)
80{
81 switch (hw->device_id) {
82 case ICE_DEV_ID_E810C_SFP:
83 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
84 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
85 return true;
86 break;
87 default:
88 break;
89 }
90
91 return false;
92}
93
94
95
96
97
98
99
100
101int ice_clear_pf_cfg(struct ice_hw *hw)
102{
103 struct ice_aq_desc desc;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
106
107 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125static int
126ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
127 struct ice_sq_cd *cd)
128{
129 struct ice_aqc_manage_mac_read_resp *resp;
130 struct ice_aqc_manage_mac_read *cmd;
131 struct ice_aq_desc desc;
132 int status;
133 u16 flags;
134 u8 i;
135
136 cmd = &desc.params.mac_read;
137
138 if (buf_size < sizeof(*resp))
139 return -EINVAL;
140
141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
142
143 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
144 if (status)
145 return status;
146
147 resp = buf;
148 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
149
150 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
151 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
152 return -EIO;
153 }
154
155
156 for (i = 0; i < cmd->num_addr; i++)
157 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
158 ether_addr_copy(hw->port_info->mac.lan_addr,
159 resp[i].mac_addr);
160 ether_addr_copy(hw->port_info->mac.perm_addr,
161 resp[i].mac_addr);
162 break;
163 }
164
165 return 0;
166}
167
168
169
170
171
172
173
174
175
176
177
178int
179ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
180 struct ice_aqc_get_phy_caps_data *pcaps,
181 struct ice_sq_cd *cd)
182{
183 struct ice_aqc_get_phy_caps *cmd;
184 u16 pcaps_size = sizeof(*pcaps);
185 struct ice_aq_desc desc;
186 struct ice_hw *hw;
187 int status;
188
189 cmd = &desc.params.get_phy;
190
191 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
192 return -EINVAL;
193 hw = pi->hw;
194
195 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
196 !ice_fw_supports_report_dflt_cfg(hw))
197 return -EINVAL;
198
199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
200
201 if (qual_mods)
202 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
203
204 cmd->param0 |= cpu_to_le16(report_mode);
205 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
206
207 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
208 report_mode);
209 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
210 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
211 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
212 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
213 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
214 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
215 pcaps->low_power_ctrl_an);
216 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
217 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
218 pcaps->eeer_value);
219 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
220 pcaps->link_fec_options);
221 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
222 pcaps->module_compliance_enforcement);
223 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
224 pcaps->extended_compliance_code);
225 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
226 pcaps->module_type[0]);
227 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
228 pcaps->module_type[1]);
229 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
230 pcaps->module_type[2]);
231
232 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
233 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
234 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
235 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
236 sizeof(pi->phy.link_info.module_type));
237 }
238
239 return status;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254static int
255ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
256 struct ice_sq_cd *cd)
257{
258 struct ice_aqc_get_link_topo *cmd;
259 struct ice_aq_desc desc;
260
261 cmd = &desc.params.get_link_topo;
262
263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
264
265 cmd->addr.topo_params.node_type_ctx =
266 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
267 ICE_AQC_LINK_TOPO_NODE_CTX_S);
268
269
270 cmd->addr.topo_params.node_type_ctx |=
271 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
272
273 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
274}
275
276
277
278
279
280
281
282
283static bool ice_is_media_cage_present(struct ice_port_info *pi)
284{
285
286
287
288
289 return !ice_aq_get_link_topo_handle(pi,
290 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
291 NULL);
292}
293
294
295
296
297
298static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
299{
300 struct ice_link_status *hw_link_info;
301
302 if (!pi)
303 return ICE_MEDIA_UNKNOWN;
304
305 hw_link_info = &pi->phy.link_info;
306 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
307
308 return ICE_MEDIA_UNKNOWN;
309
310 if (hw_link_info->phy_type_low) {
311
312
313
314
315
316 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
317 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
318 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
319 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
320 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
321 return ICE_MEDIA_DA;
322
323 switch (hw_link_info->phy_type_low) {
324 case ICE_PHY_TYPE_LOW_1000BASE_SX:
325 case ICE_PHY_TYPE_LOW_1000BASE_LX:
326 case ICE_PHY_TYPE_LOW_10GBASE_SR:
327 case ICE_PHY_TYPE_LOW_10GBASE_LR:
328 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
329 case ICE_PHY_TYPE_LOW_25GBASE_SR:
330 case ICE_PHY_TYPE_LOW_25GBASE_LR:
331 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
332 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
333 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
334 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
335 case ICE_PHY_TYPE_LOW_50GBASE_SR:
336 case ICE_PHY_TYPE_LOW_50GBASE_FR:
337 case ICE_PHY_TYPE_LOW_50GBASE_LR:
338 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
339 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
340 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
341 case ICE_PHY_TYPE_LOW_100GBASE_DR:
342 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
343 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
344 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
345 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
346 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
347 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
348 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
349 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
350 return ICE_MEDIA_FIBER;
351 case ICE_PHY_TYPE_LOW_100BASE_TX:
352 case ICE_PHY_TYPE_LOW_1000BASE_T:
353 case ICE_PHY_TYPE_LOW_2500BASE_T:
354 case ICE_PHY_TYPE_LOW_5GBASE_T:
355 case ICE_PHY_TYPE_LOW_10GBASE_T:
356 case ICE_PHY_TYPE_LOW_25GBASE_T:
357 return ICE_MEDIA_BASET;
358 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
359 case ICE_PHY_TYPE_LOW_25GBASE_CR:
360 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
361 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
362 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
363 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
364 case ICE_PHY_TYPE_LOW_50GBASE_CP:
365 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
366 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
367 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
368 return ICE_MEDIA_DA;
369 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
370 case ICE_PHY_TYPE_LOW_40G_XLAUI:
371 case ICE_PHY_TYPE_LOW_50G_LAUI2:
372 case ICE_PHY_TYPE_LOW_50G_AUI2:
373 case ICE_PHY_TYPE_LOW_50G_AUI1:
374 case ICE_PHY_TYPE_LOW_100G_AUI4:
375 case ICE_PHY_TYPE_LOW_100G_CAUI4:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_LOW_1000BASE_KX:
380 case ICE_PHY_TYPE_LOW_2500BASE_KX:
381 case ICE_PHY_TYPE_LOW_2500BASE_X:
382 case ICE_PHY_TYPE_LOW_5GBASE_KR:
383 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
384 case ICE_PHY_TYPE_LOW_25GBASE_KR:
385 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
386 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
387 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
388 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
389 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
390 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
391 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
392 return ICE_MEDIA_BACKPLANE;
393 }
394 } else {
395 switch (hw_link_info->phy_type_high) {
396 case ICE_PHY_TYPE_HIGH_100G_AUI2:
397 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
398 if (ice_is_media_cage_present(pi))
399 return ICE_MEDIA_DA;
400 fallthrough;
401 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
402 return ICE_MEDIA_BACKPLANE;
403 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
404 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
405 return ICE_MEDIA_FIBER;
406 }
407 }
408 return ICE_MEDIA_UNKNOWN;
409}
410
411
412
413
414
415
416
417
418
419
420int
421ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
422 struct ice_link_status *link, struct ice_sq_cd *cd)
423{
424 struct ice_aqc_get_link_status_data link_data = { 0 };
425 struct ice_aqc_get_link_status *resp;
426 struct ice_link_status *li_old, *li;
427 enum ice_media_type *hw_media_type;
428 struct ice_fc_info *hw_fc_info;
429 bool tx_pause, rx_pause;
430 struct ice_aq_desc desc;
431 struct ice_hw *hw;
432 u16 cmd_flags;
433 int status;
434
435 if (!pi)
436 return -EINVAL;
437 hw = pi->hw;
438 li_old = &pi->phy.link_info_old;
439 hw_media_type = &pi->phy.media_type;
440 li = &pi->phy.link_info;
441 hw_fc_info = &pi->fc;
442
443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
444 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
445 resp = &desc.params.get_link_status;
446 resp->cmd_flags = cpu_to_le16(cmd_flags);
447 resp->lport_num = pi->lport;
448
449 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
450
451 if (status)
452 return status;
453
454
455 *li_old = *li;
456
457
458 li->link_speed = le16_to_cpu(link_data.link_speed);
459 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
460 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
461 *hw_media_type = ice_get_media_type(pi);
462 li->link_info = link_data.link_info;
463 li->link_cfg_err = link_data.link_cfg_err;
464 li->an_info = link_data.an_info;
465 li->ext_info = link_data.ext_info;
466 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
467 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
468 li->topo_media_conflict = link_data.topo_media_conflict;
469 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
470 ICE_AQ_CFG_PACING_TYPE_M);
471
472
473 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
474 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
475 if (tx_pause && rx_pause)
476 hw_fc_info->current_mode = ICE_FC_FULL;
477 else if (tx_pause)
478 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
479 else if (rx_pause)
480 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
481 else
482 hw_fc_info->current_mode = ICE_FC_NONE;
483
484 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
485
486 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
487 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
488 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
489 (unsigned long long)li->phy_type_low);
490 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
491 (unsigned long long)li->phy_type_high);
492 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
493 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
494 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
495 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
496 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
497 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
498 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
499 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
500 li->max_frame_size);
501 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
502
503
504 if (link)
505 *link = *li;
506
507
508 pi->phy.get_link_info = false;
509
510 return 0;
511}
512
513
514
515
516
517
518
519
520
521static void
522ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
523 struct ice_aqc_set_mac_cfg *cmd)
524{
525 u16 fc_thres_val, tx_timer_val;
526 u32 val;
527
528
529
530
531
532
533
534
535#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
536
537
538 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
539 tx_timer_val = val &
540 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
541 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
542
543
544 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
545 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
546
547 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
548}
549
550
551
552
553
554
555
556
557
558int
559ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
560{
561 struct ice_aqc_set_mac_cfg *cmd;
562 struct ice_aq_desc desc;
563
564 cmd = &desc.params.set_mac_cfg;
565
566 if (max_frame_size == 0)
567 return -EINVAL;
568
569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
570
571 cmd->max_frame_size = cpu_to_le16(max_frame_size);
572
573 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
574
575 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
576}
577
578
579
580
581
582static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
583{
584 struct ice_switch_info *sw;
585 int status;
586
587 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
588 sizeof(*hw->switch_info), GFP_KERNEL);
589 sw = hw->switch_info;
590
591 if (!sw)
592 return -ENOMEM;
593
594 INIT_LIST_HEAD(&sw->vsi_list_map_head);
595 sw->prof_res_bm_init = 0;
596
597 status = ice_init_def_sw_recp(hw);
598 if (status) {
599 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
600 return status;
601 }
602 return 0;
603}
604
605
606
607
608
609static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
610{
611 struct ice_switch_info *sw = hw->switch_info;
612 struct ice_vsi_list_map_info *v_pos_map;
613 struct ice_vsi_list_map_info *v_tmp_map;
614 struct ice_sw_recipe *recps;
615 u8 i;
616
617 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
618 list_entry) {
619 list_del(&v_pos_map->list_entry);
620 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
621 }
622 recps = sw->recp_list;
623 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
624 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
625
626 recps[i].root_rid = i;
627 list_for_each_entry_safe(rg_entry, tmprg_entry,
628 &recps[i].rg_list, l_entry) {
629 list_del(&rg_entry->l_entry);
630 devm_kfree(ice_hw_to_dev(hw), rg_entry);
631 }
632
633 if (recps[i].adv_rule) {
634 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
635 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
636
637 mutex_destroy(&recps[i].filt_rule_lock);
638 list_for_each_entry_safe(lst_itr, tmp_entry,
639 &recps[i].filt_rules,
640 list_entry) {
641 list_del(&lst_itr->list_entry);
642 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
643 devm_kfree(ice_hw_to_dev(hw), lst_itr);
644 }
645 } else {
646 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647
648 mutex_destroy(&recps[i].filt_rule_lock);
649 list_for_each_entry_safe(lst_itr, tmp_entry,
650 &recps[i].filt_rules,
651 list_entry) {
652 list_del(&lst_itr->list_entry);
653 devm_kfree(ice_hw_to_dev(hw), lst_itr);
654 }
655 }
656 if (recps[i].root_buf)
657 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
658 }
659 ice_rm_all_sw_replay_rule_info(hw);
660 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
661 devm_kfree(ice_hw_to_dev(hw), sw);
662}
663
664
665
666
667
668static int ice_get_fw_log_cfg(struct ice_hw *hw)
669{
670 struct ice_aq_desc desc;
671 __le16 *config;
672 int status;
673 u16 size;
674
675 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
676 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
677 if (!config)
678 return -ENOMEM;
679
680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
681
682 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
683 if (!status) {
684 u16 i;
685
686
687 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
688 u16 v, m, flgs;
689
690 v = le16_to_cpu(config[i]);
691 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
692 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
693
694 if (m < ICE_AQC_FW_LOG_ID_MAX)
695 hw->fw_log.evnts[m].cur = flgs;
696 }
697 }
698
699 devm_kfree(ice_hw_to_dev(hw), config);
700
701 return status;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
741{
742 struct ice_aqc_fw_logging *cmd;
743 u16 i, chgs = 0, len = 0;
744 struct ice_aq_desc desc;
745 __le16 *data = NULL;
746 u8 actv_evnts = 0;
747 void *buf = NULL;
748 int status = 0;
749
750 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
751 return 0;
752
753
754 if (!enable &&
755 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
756 return 0;
757
758
759 status = ice_get_fw_log_cfg(hw);
760 if (status)
761 return status;
762
763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
764 cmd = &desc.params.fw_logging;
765
766
767 if (hw->fw_log.cq_en)
768 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
769
770 if (hw->fw_log.uart_en)
771 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
772
773 if (enable) {
774
775
776
777 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
778 u16 val;
779
780
781 actv_evnts |= hw->fw_log.evnts[i].cfg;
782
783 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
784 continue;
785
786 if (!data) {
787 data = devm_kcalloc(ice_hw_to_dev(hw),
788 ICE_AQC_FW_LOG_ID_MAX,
789 sizeof(*data),
790 GFP_KERNEL);
791 if (!data)
792 return -ENOMEM;
793 }
794
795 val = i << ICE_AQC_FW_LOG_ID_S;
796 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
797 data[chgs++] = cpu_to_le16(val);
798 }
799
800
801
802
803
804 if (actv_evnts) {
805
806 if (!chgs)
807 goto out;
808
809 if (hw->fw_log.cq_en)
810 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
811
812 if (hw->fw_log.uart_en)
813 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
814
815 buf = data;
816 len = sizeof(*data) * chgs;
817 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
818 }
819 }
820
821 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
822 if (!status) {
823
824
825
826
827
828
829 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
830
831 hw->fw_log.actv_evnts = actv_evnts;
832 for (i = 0; i < cnt; i++) {
833 u16 v, m;
834
835 if (!enable) {
836
837
838
839
840
841
842 hw->fw_log.evnts[i].cur = 0;
843 continue;
844 }
845
846 v = le16_to_cpu(data[i]);
847 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
848 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
849 }
850 }
851
852out:
853 if (data)
854 devm_kfree(ice_hw_to_dev(hw), data);
855
856 return status;
857}
858
859
860
861
862
863
864
865
866
867void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
868{
869 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
870 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
871 le16_to_cpu(desc->datalen));
872 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
873}
874
875
876
877
878
879
880
881
882static void ice_get_itr_intrl_gran(struct ice_hw *hw)
883{
884 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
885 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
886 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
887
888 switch (max_agg_bw) {
889 case ICE_MAX_AGG_BW_200G:
890 case ICE_MAX_AGG_BW_100G:
891 case ICE_MAX_AGG_BW_50G:
892 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
893 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
894 break;
895 case ICE_MAX_AGG_BW_25G:
896 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
897 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
898 break;
899 }
900}
901
902
903
904
905
906int ice_init_hw(struct ice_hw *hw)
907{
908 struct ice_aqc_get_phy_caps_data *pcaps;
909 u16 mac_buf_len;
910 void *mac_buf;
911 int status;
912
913
914 status = ice_set_mac_type(hw);
915 if (status)
916 return status;
917
918 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
919 PF_FUNC_RID_FUNC_NUM_M) >>
920 PF_FUNC_RID_FUNC_NUM_S;
921
922 status = ice_reset(hw, ICE_RESET_PFR);
923 if (status)
924 return status;
925
926 ice_get_itr_intrl_gran(hw);
927
928 status = ice_create_all_ctrlq(hw);
929 if (status)
930 goto err_unroll_cqinit;
931
932
933 status = ice_cfg_fw_log(hw, true);
934 if (status)
935 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
936
937 status = ice_clear_pf_cfg(hw);
938 if (status)
939 goto err_unroll_cqinit;
940
941
942 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
943 INIT_LIST_HEAD(&hw->fdir_list_head);
944
945 ice_clear_pxe_mode(hw);
946
947 status = ice_init_nvm(hw);
948 if (status)
949 goto err_unroll_cqinit;
950
951 status = ice_get_caps(hw);
952 if (status)
953 goto err_unroll_cqinit;
954
955 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
956 sizeof(*hw->port_info), GFP_KERNEL);
957 if (!hw->port_info) {
958 status = -ENOMEM;
959 goto err_unroll_cqinit;
960 }
961
962
963 hw->port_info->hw = hw;
964
965
966 status = ice_get_initial_sw_cfg(hw);
967 if (status)
968 goto err_unroll_alloc;
969
970 hw->evb_veb = true;
971
972
973 status = ice_sched_query_res_alloc(hw);
974 if (status) {
975 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
976 goto err_unroll_alloc;
977 }
978 ice_sched_get_psm_clk_freq(hw);
979
980
981 status = ice_sched_init_port(hw->port_info);
982 if (status)
983 goto err_unroll_sched;
984
985 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
986 if (!pcaps) {
987 status = -ENOMEM;
988 goto err_unroll_sched;
989 }
990
991
992 status = ice_aq_get_phy_caps(hw->port_info, false,
993 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
994 NULL);
995 devm_kfree(ice_hw_to_dev(hw), pcaps);
996 if (status)
997 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
998 status);
999
1000
1001 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1002 if (status)
1003 goto err_unroll_sched;
1004
1005
1006 if (!hw->sw_entry_point_layer) {
1007 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1008 status = -EIO;
1009 goto err_unroll_sched;
1010 }
1011 INIT_LIST_HEAD(&hw->agg_list);
1012
1013 if (!hw->max_burst_size)
1014 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1015
1016 status = ice_init_fltr_mgmt_struct(hw);
1017 if (status)
1018 goto err_unroll_sched;
1019
1020
1021
1022 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1023 sizeof(struct ice_aqc_manage_mac_read_resp),
1024 GFP_KERNEL);
1025 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1026
1027 if (!mac_buf) {
1028 status = -ENOMEM;
1029 goto err_unroll_fltr_mgmt_struct;
1030 }
1031
1032 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1033 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1034
1035 if (status)
1036 goto err_unroll_fltr_mgmt_struct;
1037
1038 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1039 if (status)
1040 goto err_unroll_fltr_mgmt_struct;
1041
1042 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1043 if (status)
1044 goto err_unroll_fltr_mgmt_struct;
1045 status = ice_init_hw_tbls(hw);
1046 if (status)
1047 goto err_unroll_fltr_mgmt_struct;
1048 mutex_init(&hw->tnl_lock);
1049 return 0;
1050
1051err_unroll_fltr_mgmt_struct:
1052 ice_cleanup_fltr_mgmt_struct(hw);
1053err_unroll_sched:
1054 ice_sched_cleanup_all(hw);
1055err_unroll_alloc:
1056 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1057err_unroll_cqinit:
1058 ice_destroy_all_ctrlq(hw);
1059 return status;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070void ice_deinit_hw(struct ice_hw *hw)
1071{
1072 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1073 ice_cleanup_fltr_mgmt_struct(hw);
1074
1075 ice_sched_cleanup_all(hw);
1076 ice_sched_clear_agg(hw);
1077 ice_free_seg(hw);
1078 ice_free_hw_tbls(hw);
1079 mutex_destroy(&hw->tnl_lock);
1080
1081 if (hw->port_info) {
1082 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1083 hw->port_info = NULL;
1084 }
1085
1086
1087 ice_cfg_fw_log(hw, false);
1088 ice_destroy_all_ctrlq(hw);
1089
1090
1091 ice_clear_all_vsi_ctx(hw);
1092}
1093
1094
1095
1096
1097
1098int ice_check_reset(struct ice_hw *hw)
1099{
1100 u32 cnt, reg = 0, grst_timeout, uld_mask;
1101
1102
1103
1104
1105
1106 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1107 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1108
1109 for (cnt = 0; cnt < grst_timeout; cnt++) {
1110 mdelay(100);
1111 reg = rd32(hw, GLGEN_RSTAT);
1112 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1113 break;
1114 }
1115
1116 if (cnt == grst_timeout) {
1117 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1118 return -EIO;
1119 }
1120
1121#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1122 GLNVM_ULD_PCIER_DONE_1_M |\
1123 GLNVM_ULD_CORER_DONE_M |\
1124 GLNVM_ULD_GLOBR_DONE_M |\
1125 GLNVM_ULD_POR_DONE_M |\
1126 GLNVM_ULD_POR_DONE_1_M |\
1127 GLNVM_ULD_PCIER_DONE_2_M)
1128
1129 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1130 GLNVM_ULD_PE_DONE_M : 0);
1131
1132
1133 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1134 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1135 if (reg == uld_mask) {
1136 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1137 break;
1138 }
1139 mdelay(10);
1140 }
1141
1142 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1143 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1144 reg);
1145 return -EIO;
1146 }
1147
1148 return 0;
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158static int ice_pf_reset(struct ice_hw *hw)
1159{
1160 u32 cnt, reg;
1161
1162
1163
1164
1165
1166
1167 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1168 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1169
1170 if (ice_check_reset(hw))
1171 return -EIO;
1172
1173 return 0;
1174 }
1175
1176
1177 reg = rd32(hw, PFGEN_CTRL);
1178
1179 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1180
1181
1182
1183
1184
1185 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1186 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1187 reg = rd32(hw, PFGEN_CTRL);
1188 if (!(reg & PFGEN_CTRL_PFSWR_M))
1189 break;
1190
1191 mdelay(1);
1192 }
1193
1194 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1195 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1196 return -EIO;
1197 }
1198
1199 return 0;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1215{
1216 u32 val = 0;
1217
1218 switch (req) {
1219 case ICE_RESET_PFR:
1220 return ice_pf_reset(hw);
1221 case ICE_RESET_CORER:
1222 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1223 val = GLGEN_RTRIG_CORER_M;
1224 break;
1225 case ICE_RESET_GLOBR:
1226 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1227 val = GLGEN_RTRIG_GLOBR_M;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 val |= rd32(hw, GLGEN_RTRIG);
1234 wr32(hw, GLGEN_RTRIG, val);
1235 ice_flush(hw);
1236
1237
1238 return ice_check_reset(hw);
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static int
1250ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1251{
1252 u8 i;
1253
1254 if (!ice_rxq_ctx)
1255 return -EINVAL;
1256
1257 if (rxq_index > QRX_CTRL_MAX_INDEX)
1258 return -EINVAL;
1259
1260
1261 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1262 wr32(hw, QRX_CONTEXT(i, rxq_index),
1263 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1264
1265 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1266 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1267 }
1268
1269 return 0;
1270}
1271
1272
1273static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1274
1275 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1276 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1277 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1278 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1279 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1280 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1281 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1282 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1283 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1284 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1285 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1286 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1287 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1288 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1289 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1290 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1291 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1292 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1293 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1294 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1295 { 0 }
1296};
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308int
1309ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1310 u32 rxq_index)
1311{
1312 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1313
1314 if (!rlan_ctx)
1315 return -EINVAL;
1316
1317 rlan_ctx->prefena = 1;
1318
1319 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1320 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1321}
1322
1323
1324const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1325
1326 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1327 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1328 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1329 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1330 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1331 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1332 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1333 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1334 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1335 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1336 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1337 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1338 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1339 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1340 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1341 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1342 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1343 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1344 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1345 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1346 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1347 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1348 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1349 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1350 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1351 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1352 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1353 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1354 { 0 }
1355};
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static int
1368ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1369 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1370{
1371 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1372 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1373}
1374
1375
1376
1377
1378
1379
1380int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1381{
1382 struct ice_sbq_cmd_desc desc = {0};
1383 struct ice_sbq_msg_req msg = {0};
1384 u16 msg_len;
1385 int status;
1386
1387 msg_len = sizeof(msg);
1388
1389 msg.dest_dev = in->dest_dev;
1390 msg.opcode = in->opcode;
1391 msg.flags = ICE_SBQ_MSG_FLAGS;
1392 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1393 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1394 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1395
1396 if (in->opcode)
1397 msg.data = cpu_to_le32(in->data);
1398 else
1399
1400
1401
1402 msg_len -= sizeof(msg.data);
1403
1404 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1405 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1406 desc.param0.cmd_len = cpu_to_le16(msg_len);
1407 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1408 if (!status && !in->opcode)
1409 in->data = le32_to_cpu
1410 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1411 return status;
1412}
1413
1414
1415
1416
1417
1418
1419
1420DEFINE_MUTEX(ice_global_cfg_lock_sw);
1421
1422
1423
1424
1425
1426
1427
1428
1429static bool ice_should_retry_sq_send_cmd(u16 opcode)
1430{
1431 switch (opcode) {
1432 case ice_aqc_opc_get_link_topo:
1433 case ice_aqc_opc_lldp_stop:
1434 case ice_aqc_opc_lldp_start:
1435 case ice_aqc_opc_lldp_filter_ctrl:
1436 return true;
1437 }
1438
1439 return false;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static int
1455ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1456 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1457 struct ice_sq_cd *cd)
1458{
1459 struct ice_aq_desc desc_cpy;
1460 bool is_cmd_for_retry;
1461 u8 *buf_cpy = NULL;
1462 u8 idx = 0;
1463 u16 opcode;
1464 int status;
1465
1466 opcode = le16_to_cpu(desc->opcode);
1467 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1468 memset(&desc_cpy, 0, sizeof(desc_cpy));
1469
1470 if (is_cmd_for_retry) {
1471 if (buf) {
1472 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1473 if (!buf_cpy)
1474 return -ENOMEM;
1475 }
1476
1477 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1478 }
1479
1480 do {
1481 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1482
1483 if (!is_cmd_for_retry || !status ||
1484 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1485 break;
1486
1487 if (buf_cpy)
1488 memcpy(buf, buf_cpy, buf_size);
1489
1490 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1491
1492 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1493
1494 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1495
1496 kfree(buf_cpy);
1497
1498 return status;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511int
1512ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1513 u16 buf_size, struct ice_sq_cd *cd)
1514{
1515 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1516 bool lock_acquired = false;
1517 int status;
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 switch (le16_to_cpu(desc->opcode)) {
1530 case ice_aqc_opc_download_pkg:
1531 case ice_aqc_opc_get_pkg_info_list:
1532 case ice_aqc_opc_get_ver:
1533 case ice_aqc_opc_upload_section:
1534 case ice_aqc_opc_update_pkg:
1535 case ice_aqc_opc_set_port_params:
1536 case ice_aqc_opc_get_vlan_mode_parameters:
1537 case ice_aqc_opc_set_vlan_mode_parameters:
1538 case ice_aqc_opc_add_recipe:
1539 case ice_aqc_opc_recipe_to_profile:
1540 case ice_aqc_opc_get_recipe:
1541 case ice_aqc_opc_get_recipe_to_profile:
1542 break;
1543 case ice_aqc_opc_release_res:
1544 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1545 break;
1546 fallthrough;
1547 default:
1548 mutex_lock(&ice_global_cfg_lock_sw);
1549 lock_acquired = true;
1550 break;
1551 }
1552
1553 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1554 if (lock_acquired)
1555 mutex_unlock(&ice_global_cfg_lock_sw);
1556
1557 return status;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1568{
1569 struct ice_aqc_get_ver *resp;
1570 struct ice_aq_desc desc;
1571 int status;
1572
1573 resp = &desc.params.get_ver;
1574
1575 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1576
1577 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1578
1579 if (!status) {
1580 hw->fw_branch = resp->fw_branch;
1581 hw->fw_maj_ver = resp->fw_major;
1582 hw->fw_min_ver = resp->fw_minor;
1583 hw->fw_patch = resp->fw_patch;
1584 hw->fw_build = le32_to_cpu(resp->fw_build);
1585 hw->api_branch = resp->api_branch;
1586 hw->api_maj_ver = resp->api_major;
1587 hw->api_min_ver = resp->api_minor;
1588 hw->api_patch = resp->api_patch;
1589 }
1590
1591 return status;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602int
1603ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1604 struct ice_sq_cd *cd)
1605{
1606 struct ice_aqc_driver_ver *cmd;
1607 struct ice_aq_desc desc;
1608 u16 len;
1609
1610 cmd = &desc.params.driver_ver;
1611
1612 if (!dv)
1613 return -EINVAL;
1614
1615 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1616
1617 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1618 cmd->major_ver = dv->major_ver;
1619 cmd->minor_ver = dv->minor_ver;
1620 cmd->build_ver = dv->build_ver;
1621 cmd->subbuild_ver = dv->subbuild_ver;
1622
1623 len = 0;
1624 while (len < sizeof(dv->driver_string) &&
1625 isascii(dv->driver_string[len]) && dv->driver_string[len])
1626 len++;
1627
1628 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1640{
1641 struct ice_aqc_q_shutdown *cmd;
1642 struct ice_aq_desc desc;
1643
1644 cmd = &desc.params.q_shutdown;
1645
1646 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1647
1648 if (unloading)
1649 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1650
1651 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static int
1681ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1682 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1683 struct ice_sq_cd *cd)
1684{
1685 struct ice_aqc_req_res *cmd_resp;
1686 struct ice_aq_desc desc;
1687 int status;
1688
1689 cmd_resp = &desc.params.res_owner;
1690
1691 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1692
1693 cmd_resp->res_id = cpu_to_le16(res);
1694 cmd_resp->access_type = cpu_to_le16(access);
1695 cmd_resp->res_number = cpu_to_le32(sdp_number);
1696 cmd_resp->timeout = cpu_to_le32(*timeout);
1697 *timeout = 0;
1698
1699 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1713 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1714 *timeout = le32_to_cpu(cmd_resp->timeout);
1715 return 0;
1716 } else if (le16_to_cpu(cmd_resp->status) ==
1717 ICE_AQ_RES_GLBL_IN_PROG) {
1718 *timeout = le32_to_cpu(cmd_resp->timeout);
1719 return -EIO;
1720 } else if (le16_to_cpu(cmd_resp->status) ==
1721 ICE_AQ_RES_GLBL_DONE) {
1722 return -EALREADY;
1723 }
1724
1725
1726 *timeout = 0;
1727 return -EIO;
1728 }
1729
1730
1731
1732
1733
1734 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1735 *timeout = le32_to_cpu(cmd_resp->timeout);
1736
1737 return status;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749static int
1750ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1751 struct ice_sq_cd *cd)
1752{
1753 struct ice_aqc_req_res *cmd;
1754 struct ice_aq_desc desc;
1755
1756 cmd = &desc.params.res_owner;
1757
1758 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1759
1760 cmd->res_id = cpu_to_le16(res);
1761 cmd->res_number = cpu_to_le32(sdp_number);
1762
1763 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775int
1776ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1777 enum ice_aq_res_access_type access, u32 timeout)
1778{
1779#define ICE_RES_POLLING_DELAY_MS 10
1780 u32 delay = ICE_RES_POLLING_DELAY_MS;
1781 u32 time_left = timeout;
1782 int status;
1783
1784 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1785
1786
1787
1788
1789
1790
1791 if (status == -EALREADY)
1792 goto ice_acquire_res_exit;
1793
1794 if (status)
1795 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1796
1797
1798 timeout = time_left;
1799 while (status && timeout && time_left) {
1800 mdelay(delay);
1801 timeout = (timeout > delay) ? timeout - delay : 0;
1802 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1803
1804 if (status == -EALREADY)
1805
1806 break;
1807
1808 if (!status)
1809
1810 break;
1811 }
1812 if (status && status != -EALREADY)
1813 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1814
1815ice_acquire_res_exit:
1816 if (status == -EALREADY) {
1817 if (access == ICE_RES_WRITE)
1818 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1819 else
1820 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1821 }
1822 return status;
1823}
1824
1825
1826
1827
1828
1829
1830
1831
1832void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1833{
1834 u32 total_delay = 0;
1835 int status;
1836
1837 status = ice_aq_release_res(hw, res, 0, NULL);
1838
1839
1840
1841
1842 while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
1843 mdelay(1);
1844 status = ice_aq_release_res(hw, res, 0, NULL);
1845 total_delay++;
1846 }
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860int
1861ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1862 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1863 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1864{
1865 struct ice_aqc_alloc_free_res_cmd *cmd;
1866 struct ice_aq_desc desc;
1867
1868 cmd = &desc.params.sw_res_ctrl;
1869
1870 if (!buf)
1871 return -EINVAL;
1872
1873 if (buf_size < flex_array_size(buf, elem, num_entries))
1874 return -EINVAL;
1875
1876 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1877
1878 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1879
1880 cmd->num_entries = cpu_to_le16(num_entries);
1881
1882 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893int
1894ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1895{
1896 struct ice_aqc_alloc_free_res_elem *buf;
1897 u16 buf_len;
1898 int status;
1899
1900 buf_len = struct_size(buf, elem, num);
1901 buf = kzalloc(buf_len, GFP_KERNEL);
1902 if (!buf)
1903 return -ENOMEM;
1904
1905
1906 buf->num_elems = cpu_to_le16(num);
1907 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1908 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1909 if (btm)
1910 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1911
1912 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1913 ice_aqc_opc_alloc_res, NULL);
1914 if (status)
1915 goto ice_alloc_res_exit;
1916
1917 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1918
1919ice_alloc_res_exit:
1920 kfree(buf);
1921 return status;
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1932{
1933 struct ice_aqc_alloc_free_res_elem *buf;
1934 u16 buf_len;
1935 int status;
1936
1937 buf_len = struct_size(buf, elem, num);
1938 buf = kzalloc(buf_len, GFP_KERNEL);
1939 if (!buf)
1940 return -ENOMEM;
1941
1942
1943 buf->num_elems = cpu_to_le16(num);
1944 buf->res_type = cpu_to_le16(type);
1945 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1946
1947 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1948 ice_aqc_opc_free_res, NULL);
1949 if (status)
1950 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1951
1952 kfree(buf);
1953 return status;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1966{
1967 u8 funcs;
1968
1969#define ICE_CAPS_VALID_FUNCS_M 0xFF
1970 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1971 ICE_CAPS_VALID_FUNCS_M);
1972
1973 if (!funcs)
1974 return 0;
1975
1976 return max / funcs;
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static bool
1993ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1994 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1995{
1996 u32 logical_id = le32_to_cpu(elem->logical_id);
1997 u32 phys_id = le32_to_cpu(elem->phys_id);
1998 u32 number = le32_to_cpu(elem->number);
1999 u16 cap = le16_to_cpu(elem->cap);
2000 bool found = true;
2001
2002 switch (cap) {
2003 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2004 caps->valid_functions = number;
2005 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
2006 caps->valid_functions);
2007 break;
2008 case ICE_AQC_CAPS_SRIOV:
2009 caps->sr_iov_1_1 = (number == 1);
2010 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2011 caps->sr_iov_1_1);
2012 break;
2013 case ICE_AQC_CAPS_DCB:
2014 caps->dcb = (number == 1);
2015 caps->active_tc_bitmap = logical_id;
2016 caps->maxtc = phys_id;
2017 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2018 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2019 caps->active_tc_bitmap);
2020 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2021 break;
2022 case ICE_AQC_CAPS_RSS:
2023 caps->rss_table_size = number;
2024 caps->rss_table_entry_width = logical_id;
2025 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2026 caps->rss_table_size);
2027 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2028 caps->rss_table_entry_width);
2029 break;
2030 case ICE_AQC_CAPS_RXQS:
2031 caps->num_rxq = number;
2032 caps->rxq_first_id = phys_id;
2033 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2034 caps->num_rxq);
2035 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2036 caps->rxq_first_id);
2037 break;
2038 case ICE_AQC_CAPS_TXQS:
2039 caps->num_txq = number;
2040 caps->txq_first_id = phys_id;
2041 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2042 caps->num_txq);
2043 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2044 caps->txq_first_id);
2045 break;
2046 case ICE_AQC_CAPS_MSIX:
2047 caps->num_msix_vectors = number;
2048 caps->msix_vector_first_id = phys_id;
2049 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2050 caps->num_msix_vectors);
2051 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2052 caps->msix_vector_first_id);
2053 break;
2054 case ICE_AQC_CAPS_PENDING_NVM_VER:
2055 caps->nvm_update_pending_nvm = true;
2056 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2057 break;
2058 case ICE_AQC_CAPS_PENDING_OROM_VER:
2059 caps->nvm_update_pending_orom = true;
2060 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2061 break;
2062 case ICE_AQC_CAPS_PENDING_NET_VER:
2063 caps->nvm_update_pending_netlist = true;
2064 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2065 break;
2066 case ICE_AQC_CAPS_NVM_MGMT:
2067 caps->nvm_unified_update =
2068 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2069 true : false;
2070 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2071 caps->nvm_unified_update);
2072 break;
2073 case ICE_AQC_CAPS_RDMA:
2074 caps->rdma = (number == 1);
2075 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2076 break;
2077 case ICE_AQC_CAPS_MAX_MTU:
2078 caps->max_mtu = number;
2079 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2080 prefix, caps->max_mtu);
2081 break;
2082 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2083 caps->pcie_reset_avoidance = (number > 0);
2084 ice_debug(hw, ICE_DBG_INIT,
2085 "%s: pcie_reset_avoidance = %d\n", prefix,
2086 caps->pcie_reset_avoidance);
2087 break;
2088 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2089 caps->reset_restrict_support = (number == 1);
2090 ice_debug(hw, ICE_DBG_INIT,
2091 "%s: reset_restrict_support = %d\n", prefix,
2092 caps->reset_restrict_support);
2093 break;
2094 default:
2095
2096 found = false;
2097 }
2098
2099 return found;
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111static void
2112ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2113{
2114
2115
2116
2117 if (hw->dev_caps.num_funcs > 4) {
2118
2119 caps->maxtc = 4;
2120 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2121 caps->maxtc);
2122 if (caps->rdma) {
2123 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2124 caps->rdma = 0;
2125 }
2126
2127
2128
2129
2130 if (caps == &hw->dev_caps.common_cap)
2131 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2132 }
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143static void
2144ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2145 struct ice_aqc_list_caps_elem *cap)
2146{
2147 u32 logical_id = le32_to_cpu(cap->logical_id);
2148 u32 number = le32_to_cpu(cap->number);
2149
2150 func_p->num_allocd_vfs = number;
2151 func_p->vf_base_id = logical_id;
2152 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2153 func_p->num_allocd_vfs);
2154 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2155 func_p->vf_base_id);
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166static void
2167ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2168 struct ice_aqc_list_caps_elem *cap)
2169{
2170 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2171 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2172 le32_to_cpu(cap->number));
2173 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2174 func_p->guar_num_vsi);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185static void
2186ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2187 struct ice_aqc_list_caps_elem *cap)
2188{
2189 struct ice_ts_func_info *info = &func_p->ts_func_info;
2190 u32 number = le32_to_cpu(cap->number);
2191
2192 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2193 func_p->common_cap.ieee_1588 = info->ena;
2194
2195 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2196 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2197 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2198 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2199
2200 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2201 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2202
2203 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2204 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2205 } else {
2206
2207
2208
2209
2210 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2211 info->clk_freq);
2212 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2213 }
2214
2215 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2216 func_p->common_cap.ieee_1588);
2217 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2218 info->src_tmr_owned);
2219 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2220 info->tmr_ena);
2221 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2222 info->tmr_index_owned);
2223 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2224 info->tmr_index_assoc);
2225 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2226 info->clk_freq);
2227 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2228 info->clk_src);
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238static void
2239ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2240{
2241 u32 reg_val, val;
2242
2243 reg_val = rd32(hw, GLQF_FD_SIZE);
2244 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2245 GLQF_FD_SIZE_FD_GSIZE_S;
2246 func_p->fd_fltr_guar =
2247 ice_get_num_per_func(hw, val);
2248 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2249 GLQF_FD_SIZE_FD_BSIZE_S;
2250 func_p->fd_fltr_best_effort = val;
2251
2252 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2253 func_p->fd_fltr_guar);
2254 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2255 func_p->fd_fltr_best_effort);
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272static void
2273ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2274 void *buf, u32 cap_count)
2275{
2276 struct ice_aqc_list_caps_elem *cap_resp;
2277 u32 i;
2278
2279 cap_resp = buf;
2280
2281 memset(func_p, 0, sizeof(*func_p));
2282
2283 for (i = 0; i < cap_count; i++) {
2284 u16 cap = le16_to_cpu(cap_resp[i].cap);
2285 bool found;
2286
2287 found = ice_parse_common_caps(hw, &func_p->common_cap,
2288 &cap_resp[i], "func caps");
2289
2290 switch (cap) {
2291 case ICE_AQC_CAPS_VF:
2292 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2293 break;
2294 case ICE_AQC_CAPS_VSI:
2295 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2296 break;
2297 case ICE_AQC_CAPS_1588:
2298 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2299 break;
2300 case ICE_AQC_CAPS_FD:
2301 ice_parse_fdir_func_caps(hw, func_p);
2302 break;
2303 default:
2304
2305 if (!found)
2306 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2307 i, cap);
2308 break;
2309 }
2310 }
2311
2312 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2313}
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323static void
2324ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2325 struct ice_aqc_list_caps_elem *cap)
2326{
2327 u32 number = le32_to_cpu(cap->number);
2328
2329 dev_p->num_funcs = hweight32(number);
2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2331 dev_p->num_funcs);
2332}
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342static void
2343ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2344 struct ice_aqc_list_caps_elem *cap)
2345{
2346 u32 number = le32_to_cpu(cap->number);
2347
2348 dev_p->num_vfs_exposed = number;
2349 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2350 dev_p->num_vfs_exposed);
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361static void
2362ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2363 struct ice_aqc_list_caps_elem *cap)
2364{
2365 u32 number = le32_to_cpu(cap->number);
2366
2367 dev_p->num_vsi_allocd_to_host = number;
2368 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2369 dev_p->num_vsi_allocd_to_host);
2370}
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380static void
2381ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2382 struct ice_aqc_list_caps_elem *cap)
2383{
2384 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2385 u32 logical_id = le32_to_cpu(cap->logical_id);
2386 u32 phys_id = le32_to_cpu(cap->phys_id);
2387 u32 number = le32_to_cpu(cap->number);
2388
2389 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2390 dev_p->common_cap.ieee_1588 = info->ena;
2391
2392 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2393 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2394 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2395
2396 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2397 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2398 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2399
2400 info->ena_ports = logical_id;
2401 info->tmr_own_map = phys_id;
2402
2403 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2404 dev_p->common_cap.ieee_1588);
2405 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2406 info->tmr0_owner);
2407 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2408 info->tmr0_owned);
2409 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2410 info->tmr0_ena);
2411 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2412 info->tmr1_owner);
2413 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2414 info->tmr1_owned);
2415 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2416 info->tmr1_ena);
2417 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2418 info->ena_ports);
2419 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2420 info->tmr_own_map);
2421}
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431static void
2432ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2433 struct ice_aqc_list_caps_elem *cap)
2434{
2435 u32 number = le32_to_cpu(cap->number);
2436
2437 dev_p->num_flow_director_fltr = number;
2438 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2439 dev_p->num_flow_director_fltr);
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456static void
2457ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2458 void *buf, u32 cap_count)
2459{
2460 struct ice_aqc_list_caps_elem *cap_resp;
2461 u32 i;
2462
2463 cap_resp = buf;
2464
2465 memset(dev_p, 0, sizeof(*dev_p));
2466
2467 for (i = 0; i < cap_count; i++) {
2468 u16 cap = le16_to_cpu(cap_resp[i].cap);
2469 bool found;
2470
2471 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2472 &cap_resp[i], "dev caps");
2473
2474 switch (cap) {
2475 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2476 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2477 break;
2478 case ICE_AQC_CAPS_VF:
2479 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2480 break;
2481 case ICE_AQC_CAPS_VSI:
2482 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2483 break;
2484 case ICE_AQC_CAPS_1588:
2485 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2486 break;
2487 case ICE_AQC_CAPS_FD:
2488 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2489 break;
2490 default:
2491
2492 if (!found)
2493 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2494 i, cap);
2495 break;
2496 }
2497 }
2498
2499 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2500}
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521int
2522ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2523 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2524{
2525 struct ice_aqc_list_caps *cmd;
2526 struct ice_aq_desc desc;
2527 int status;
2528
2529 cmd = &desc.params.get_cap;
2530
2531 if (opc != ice_aqc_opc_list_func_caps &&
2532 opc != ice_aqc_opc_list_dev_caps)
2533 return -EINVAL;
2534
2535 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2536 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2537
2538 if (cap_count)
2539 *cap_count = le32_to_cpu(cmd->count);
2540
2541 return status;
2542}
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552int
2553ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2554{
2555 u32 cap_count = 0;
2556 void *cbuf;
2557 int status;
2558
2559 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2560 if (!cbuf)
2561 return -ENOMEM;
2562
2563
2564
2565
2566
2567 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2568
2569 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2570 ice_aqc_opc_list_dev_caps, NULL);
2571 if (!status)
2572 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2573 kfree(cbuf);
2574
2575 return status;
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586static int
2587ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2588{
2589 u32 cap_count = 0;
2590 void *cbuf;
2591 int status;
2592
2593 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2594 if (!cbuf)
2595 return -ENOMEM;
2596
2597
2598
2599
2600
2601 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2602
2603 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2604 ice_aqc_opc_list_func_caps, NULL);
2605 if (!status)
2606 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2607 kfree(cbuf);
2608
2609 return status;
2610}
2611
2612
2613
2614
2615
2616void ice_set_safe_mode_caps(struct ice_hw *hw)
2617{
2618 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2619 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2620 struct ice_hw_common_caps cached_caps;
2621 u32 num_funcs;
2622
2623
2624 cached_caps = func_caps->common_cap;
2625
2626
2627 memset(func_caps, 0, sizeof(*func_caps));
2628
2629#define ICE_RESTORE_FUNC_CAP(name) \
2630 func_caps->common_cap.name = cached_caps.name
2631
2632
2633 ICE_RESTORE_FUNC_CAP(valid_functions);
2634 ICE_RESTORE_FUNC_CAP(txq_first_id);
2635 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2636 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2637 ICE_RESTORE_FUNC_CAP(max_mtu);
2638 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2639 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2640 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2641 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2642
2643
2644 func_caps->common_cap.num_rxq = 1;
2645 func_caps->common_cap.num_txq = 1;
2646
2647
2648 func_caps->common_cap.num_msix_vectors = 2;
2649 func_caps->guar_num_vsi = 1;
2650
2651
2652 cached_caps = dev_caps->common_cap;
2653 num_funcs = dev_caps->num_funcs;
2654
2655
2656 memset(dev_caps, 0, sizeof(*dev_caps));
2657
2658#define ICE_RESTORE_DEV_CAP(name) \
2659 dev_caps->common_cap.name = cached_caps.name
2660
2661
2662 ICE_RESTORE_DEV_CAP(valid_functions);
2663 ICE_RESTORE_DEV_CAP(txq_first_id);
2664 ICE_RESTORE_DEV_CAP(rxq_first_id);
2665 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2666 ICE_RESTORE_DEV_CAP(max_mtu);
2667 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2668 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2669 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2670 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2671 dev_caps->num_funcs = num_funcs;
2672
2673
2674 dev_caps->common_cap.num_rxq = num_funcs;
2675 dev_caps->common_cap.num_txq = num_funcs;
2676
2677
2678 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2679}
2680
2681
2682
2683
2684
2685int ice_get_caps(struct ice_hw *hw)
2686{
2687 int status;
2688
2689 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2690 if (status)
2691 return status;
2692
2693 return ice_discover_func_caps(hw, &hw->func_caps);
2694}
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705int
2706ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2707 struct ice_sq_cd *cd)
2708{
2709 struct ice_aqc_manage_mac_write *cmd;
2710 struct ice_aq_desc desc;
2711
2712 cmd = &desc.params.mac_write;
2713 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2714
2715 cmd->flags = flags;
2716 ether_addr_copy(cmd->mac_addr, mac_addr);
2717
2718 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2719}
2720
2721
2722
2723
2724
2725
2726
2727static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2728{
2729 struct ice_aq_desc desc;
2730
2731 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2732 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2733
2734 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2735}
2736
2737
2738
2739
2740
2741
2742
2743
2744void ice_clear_pxe_mode(struct ice_hw *hw)
2745{
2746 if (ice_check_sq_alive(hw, &hw->adminq))
2747 ice_aq_clear_pxe_mode(hw);
2748}
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758int
2759ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
2760 struct ice_sq_cd *cd)
2761
2762{
2763 struct ice_aqc_set_port_params *cmd;
2764 struct ice_hw *hw = pi->hw;
2765 struct ice_aq_desc desc;
2766 u16 cmd_flags = 0;
2767
2768 cmd = &desc.params.set_port_params;
2769
2770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2771 if (double_vlan)
2772 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2773 cmd->cmd_flags = cpu_to_le16(cmd_flags);
2774
2775 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2776}
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791static u16
2792ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2793{
2794 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2795 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2796
2797 switch (phy_type_low) {
2798 case ICE_PHY_TYPE_LOW_100BASE_TX:
2799 case ICE_PHY_TYPE_LOW_100M_SGMII:
2800 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2801 break;
2802 case ICE_PHY_TYPE_LOW_1000BASE_T:
2803 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2804 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2805 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2806 case ICE_PHY_TYPE_LOW_1G_SGMII:
2807 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2808 break;
2809 case ICE_PHY_TYPE_LOW_2500BASE_T:
2810 case ICE_PHY_TYPE_LOW_2500BASE_X:
2811 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2812 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2813 break;
2814 case ICE_PHY_TYPE_LOW_5GBASE_T:
2815 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2816 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2817 break;
2818 case ICE_PHY_TYPE_LOW_10GBASE_T:
2819 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2820 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2821 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2822 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2823 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2824 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2825 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2826 break;
2827 case ICE_PHY_TYPE_LOW_25GBASE_T:
2828 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2829 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2830 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2831 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2832 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2833 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2834 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2835 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2836 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2837 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2838 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2839 break;
2840 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2841 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2842 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2843 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2844 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2845 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2846 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2847 break;
2848 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2849 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2850 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2851 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2852 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2853 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2854 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2855 case ICE_PHY_TYPE_LOW_50G_AUI2:
2856 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2857 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2858 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2859 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2860 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2861 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2862 case ICE_PHY_TYPE_LOW_50G_AUI1:
2863 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2864 break;
2865 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2866 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2867 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2868 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2869 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2870 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2871 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2872 case ICE_PHY_TYPE_LOW_100G_AUI4:
2873 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2874 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2875 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2876 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2877 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2878 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2879 break;
2880 default:
2881 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2882 break;
2883 }
2884
2885 switch (phy_type_high) {
2886 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2887 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2888 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2889 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2890 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2891 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2892 break;
2893 default:
2894 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2895 break;
2896 }
2897
2898 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2899 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2900 return ICE_AQ_LINK_SPEED_UNKNOWN;
2901 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2902 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2903 return ICE_AQ_LINK_SPEED_UNKNOWN;
2904 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2905 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2906 return speed_phy_type_low;
2907 else
2908 return speed_phy_type_high;
2909}
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926void
2927ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2928 u16 link_speeds_bitmap)
2929{
2930 u64 pt_high;
2931 u64 pt_low;
2932 int index;
2933 u16 speed;
2934
2935
2936 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2937 pt_low = BIT_ULL(index);
2938 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2939
2940 if (link_speeds_bitmap & speed)
2941 *phy_type_low |= BIT_ULL(index);
2942 }
2943
2944
2945 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2946 pt_high = BIT_ULL(index);
2947 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2948
2949 if (link_speeds_bitmap & speed)
2950 *phy_type_high |= BIT_ULL(index);
2951 }
2952}
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966int
2967ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2968 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2969{
2970 struct ice_aq_desc desc;
2971 int status;
2972
2973 if (!cfg)
2974 return -EINVAL;
2975
2976
2977 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2978 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2979 cfg->caps);
2980
2981 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2982 }
2983
2984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2985 desc.params.set_phy.lport_num = pi->lport;
2986 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2987
2988 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2989 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2990 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2991 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2992 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2993 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2994 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2995 cfg->low_power_ctrl_an);
2996 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2997 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2998 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2999 cfg->link_fec_opt);
3000
3001 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3002 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3003 status = 0;
3004
3005 if (!status)
3006 pi->phy.curr_user_phy_cfg = *cfg;
3007
3008 return status;
3009}
3010
3011
3012
3013
3014
3015int ice_update_link_info(struct ice_port_info *pi)
3016{
3017 struct ice_link_status *li;
3018 int status;
3019
3020 if (!pi)
3021 return -EINVAL;
3022
3023 li = &pi->phy.link_info;
3024
3025 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3026 if (status)
3027 return status;
3028
3029 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3030 struct ice_aqc_get_phy_caps_data *pcaps;
3031 struct ice_hw *hw;
3032
3033 hw = pi->hw;
3034 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
3035 GFP_KERNEL);
3036 if (!pcaps)
3037 return -ENOMEM;
3038
3039 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3040 pcaps, NULL);
3041
3042 devm_kfree(ice_hw_to_dev(hw), pcaps);
3043 }
3044
3045 return status;
3046}
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056static void
3057ice_cache_phy_user_req(struct ice_port_info *pi,
3058 struct ice_phy_cache_mode_data cache_data,
3059 enum ice_phy_cache_mode cache_mode)
3060{
3061 if (!pi)
3062 return;
3063
3064 switch (cache_mode) {
3065 case ICE_FC_MODE:
3066 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3067 break;
3068 case ICE_SPEED_MODE:
3069 pi->phy.curr_user_speed_req =
3070 cache_data.data.curr_user_speed_req;
3071 break;
3072 case ICE_FEC_MODE:
3073 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3074 break;
3075 default:
3076 break;
3077 }
3078}
3079
3080
3081
3082
3083
3084
3085
3086enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3087{
3088 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3089 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3090 return ICE_FC_FULL;
3091
3092 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3093 return ICE_FC_TX_PAUSE;
3094
3095 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3096 return ICE_FC_RX_PAUSE;
3097
3098 return ICE_FC_NONE;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3109{
3110 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3111 return ICE_FEC_AUTO;
3112
3113 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3114 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3115 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3116 ICE_AQC_PHY_FEC_25G_KR_REQ))
3117 return ICE_FEC_BASER;
3118
3119 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3120 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3121 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3122 return ICE_FEC_RS;
3123
3124 return ICE_FEC_NONE;
3125}
3126
3127
3128
3129
3130
3131
3132
3133int
3134ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3135 enum ice_fc_mode req_mode)
3136{
3137 struct ice_phy_cache_mode_data cache_data;
3138 u8 pause_mask = 0x0;
3139
3140 if (!pi || !cfg)
3141 return -EINVAL;
3142
3143 switch (req_mode) {
3144 case ICE_FC_FULL:
3145 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3146 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3147 break;
3148 case ICE_FC_RX_PAUSE:
3149 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3150 break;
3151 case ICE_FC_TX_PAUSE:
3152 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3153 break;
3154 default:
3155 break;
3156 }
3157
3158
3159 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3160 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3161
3162
3163 cfg->caps |= pause_mask;
3164
3165
3166 cache_data.data.curr_user_fc_req = req_mode;
3167 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3168
3169 return 0;
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180int
3181ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3182{
3183 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3184 struct ice_aqc_get_phy_caps_data *pcaps;
3185 struct ice_hw *hw;
3186 int status;
3187
3188 if (!pi || !aq_failures)
3189 return -EINVAL;
3190
3191 *aq_failures = 0;
3192 hw = pi->hw;
3193
3194 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3195 if (!pcaps)
3196 return -ENOMEM;
3197
3198
3199 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3200 pcaps, NULL);
3201 if (status) {
3202 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3203 goto out;
3204 }
3205
3206 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3207
3208
3209 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3210 if (status)
3211 goto out;
3212
3213
3214 if (cfg.caps != pcaps->caps) {
3215 int retry_count, retry_max = 10;
3216
3217
3218 if (ena_auto_link_update)
3219 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3220
3221 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3222 if (status) {
3223 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3224 goto out;
3225 }
3226
3227
3228
3229
3230
3231
3232 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3233 status = ice_update_link_info(pi);
3234
3235 if (!status)
3236 break;
3237
3238 mdelay(100);
3239 }
3240
3241 if (status)
3242 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3243 }
3244
3245out:
3246 devm_kfree(ice_hw_to_dev(hw), pcaps);
3247 return status;
3248}
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258bool
3259ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3260 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3261{
3262 u8 caps_mask, cfg_mask;
3263
3264 if (!phy_caps || !phy_cfg)
3265 return false;
3266
3267
3268
3269
3270 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3271 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3272 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3273
3274 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3275 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3276 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3277 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3278 phy_caps->eee_cap != phy_cfg->eee_cap ||
3279 phy_caps->eeer_value != phy_cfg->eeer_value ||
3280 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3281 return false;
3282
3283 return true;
3284}
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295void
3296ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3297 struct ice_aqc_get_phy_caps_data *caps,
3298 struct ice_aqc_set_phy_cfg_data *cfg)
3299{
3300 if (!pi || !caps || !cfg)
3301 return;
3302
3303 memset(cfg, 0, sizeof(*cfg));
3304 cfg->phy_type_low = caps->phy_type_low;
3305 cfg->phy_type_high = caps->phy_type_high;
3306 cfg->caps = caps->caps;
3307 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3308 cfg->eee_cap = caps->eee_cap;
3309 cfg->eeer_value = caps->eeer_value;
3310 cfg->link_fec_opt = caps->link_fec_options;
3311 cfg->module_compliance_enforcement =
3312 caps->module_compliance_enforcement;
3313}
3314
3315
3316
3317
3318
3319
3320
3321int
3322ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3323 enum ice_fec_mode fec)
3324{
3325 struct ice_aqc_get_phy_caps_data *pcaps;
3326 struct ice_hw *hw;
3327 int status;
3328
3329 if (!pi || !cfg)
3330 return -EINVAL;
3331
3332 hw = pi->hw;
3333
3334 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3335 if (!pcaps)
3336 return -ENOMEM;
3337
3338 status = ice_aq_get_phy_caps(pi, false,
3339 (ice_fw_supports_report_dflt_cfg(hw) ?
3340 ICE_AQC_REPORT_DFLT_CFG :
3341 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3342 if (status)
3343 goto out;
3344
3345 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3346 cfg->link_fec_opt = pcaps->link_fec_options;
3347
3348 switch (fec) {
3349 case ICE_FEC_BASER:
3350
3351
3352
3353 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3354 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3355 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3356 ICE_AQC_PHY_FEC_25G_KR_REQ;
3357 break;
3358 case ICE_FEC_RS:
3359
3360
3361
3362 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3363 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3364 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3365 break;
3366 case ICE_FEC_NONE:
3367
3368 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3369 break;
3370 case ICE_FEC_AUTO:
3371
3372 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3373 cfg->link_fec_opt |= pcaps->link_fec_options;
3374 break;
3375 default:
3376 status = -EINVAL;
3377 break;
3378 }
3379
3380 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3381 !ice_fw_supports_report_dflt_cfg(hw)) {
3382 struct ice_link_default_override_tlv tlv = { 0 };
3383
3384 status = ice_get_link_default_override(&tlv, pi);
3385 if (status)
3386 goto out;
3387
3388 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3389 (tlv.options & ICE_LINK_OVERRIDE_EN))
3390 cfg->link_fec_opt = tlv.fec_options;
3391 }
3392
3393out:
3394 kfree(pcaps);
3395
3396 return status;
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3409{
3410 struct ice_phy_info *phy_info;
3411 int status = 0;
3412
3413 if (!pi || !link_up)
3414 return -EINVAL;
3415
3416 phy_info = &pi->phy;
3417
3418 if (phy_info->get_link_info) {
3419 status = ice_update_link_info(pi);
3420
3421 if (status)
3422 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3423 status);
3424 }
3425
3426 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3427
3428 return status;
3429}
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439int
3440ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3441 struct ice_sq_cd *cd)
3442{
3443 struct ice_aqc_restart_an *cmd;
3444 struct ice_aq_desc desc;
3445
3446 cmd = &desc.params.restart_an;
3447
3448 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3449
3450 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3451 cmd->lport_num = pi->lport;
3452 if (ena_link)
3453 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3454 else
3455 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3456
3457 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3458}
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469int
3470ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3471 struct ice_sq_cd *cd)
3472{
3473 struct ice_aqc_set_event_mask *cmd;
3474 struct ice_aq_desc desc;
3475
3476 cmd = &desc.params.set_event_mask;
3477
3478 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3479
3480 cmd->lport_num = port_num;
3481
3482 cmd->event_mask = cpu_to_le16(mask);
3483 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3484}
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494int
3495ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3496{
3497 struct ice_aqc_set_mac_lb *cmd;
3498 struct ice_aq_desc desc;
3499
3500 cmd = &desc.params.set_mac_lb;
3501
3502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3503 if (ena_lpbk)
3504 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3505
3506 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517int
3518ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3519 struct ice_sq_cd *cd)
3520{
3521 struct ice_aqc_set_port_id_led *cmd;
3522 struct ice_hw *hw = pi->hw;
3523 struct ice_aq_desc desc;
3524
3525 cmd = &desc.params.set_port_id_led;
3526
3527 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3528
3529 if (is_orig_mode)
3530 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3531 else
3532 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3533
3534 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3535}
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552int
3553ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3554 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3555 bool write, struct ice_sq_cd *cd)
3556{
3557 struct ice_aqc_sff_eeprom *cmd;
3558 struct ice_aq_desc desc;
3559 int status;
3560
3561 if (!data || (mem_addr & 0xff00))
3562 return -EINVAL;
3563
3564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3565 cmd = &desc.params.read_write_sff_param;
3566 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3567 cmd->lport_num = (u8)(lport & 0xff);
3568 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3569 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3570 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3571 ((set_page <<
3572 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3573 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3574 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3575 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3576 if (write)
3577 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3578
3579 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3580 return status;
3581}
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591static int
3592__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3593{
3594 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3595 struct ice_aqc_get_set_rss_lut *cmd_resp;
3596 struct ice_aq_desc desc;
3597 int status;
3598 u8 *lut;
3599
3600 if (!params)
3601 return -EINVAL;
3602
3603 vsi_handle = params->vsi_handle;
3604 lut = params->lut;
3605
3606 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3607 return -EINVAL;
3608
3609 lut_size = params->lut_size;
3610 lut_type = params->lut_type;
3611 glob_lut_idx = params->global_lut_id;
3612 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3613
3614 cmd_resp = &desc.params.get_set_rss_lut;
3615
3616 if (set) {
3617 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3618 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3619 } else {
3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3621 }
3622
3623 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3624 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3625 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3626 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3627
3628 switch (lut_type) {
3629 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3630 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3631 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3632 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3633 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3634 break;
3635 default:
3636 status = -EINVAL;
3637 goto ice_aq_get_set_rss_lut_exit;
3638 }
3639
3640 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3641 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3642 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3643
3644 if (!set)
3645 goto ice_aq_get_set_rss_lut_send;
3646 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3647 if (!set)
3648 goto ice_aq_get_set_rss_lut_send;
3649 } else {
3650 goto ice_aq_get_set_rss_lut_send;
3651 }
3652
3653
3654 switch (lut_size) {
3655 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3656 break;
3657 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3658 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3659 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3660 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3661 break;
3662 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3663 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3664 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3665 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3666 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3667 break;
3668 }
3669 fallthrough;
3670 default:
3671 status = -EINVAL;
3672 goto ice_aq_get_set_rss_lut_exit;
3673 }
3674
3675ice_aq_get_set_rss_lut_send:
3676 cmd_resp->flags = cpu_to_le16(flags);
3677 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3678
3679ice_aq_get_set_rss_lut_exit:
3680 return status;
3681}
3682
3683
3684
3685
3686
3687
3688
3689
3690int
3691ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3692{
3693 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703int
3704ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3705{
3706 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718static int
3719__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3720 struct ice_aqc_get_set_rss_keys *key, bool set)
3721{
3722 struct ice_aqc_get_set_rss_key *cmd_resp;
3723 u16 key_size = sizeof(*key);
3724 struct ice_aq_desc desc;
3725
3726 cmd_resp = &desc.params.get_set_rss_key;
3727
3728 if (set) {
3729 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3730 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3731 } else {
3732 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3733 }
3734
3735 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3736 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3737 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3738 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3739
3740 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3741}
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751int
3752ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3753 struct ice_aqc_get_set_rss_keys *key)
3754{
3755 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3756 return -EINVAL;
3757
3758 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3759 key, false);
3760}
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770int
3771ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3772 struct ice_aqc_get_set_rss_keys *keys)
3773{
3774 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3775 return -EINVAL;
3776
3777 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3778 keys, true);
3779}
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802static int
3803ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3804 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3805 struct ice_sq_cd *cd)
3806{
3807 struct ice_aqc_add_tx_qgrp *list;
3808 struct ice_aqc_add_txqs *cmd;
3809 struct ice_aq_desc desc;
3810 u16 i, sum_size = 0;
3811
3812 cmd = &desc.params.add_txqs;
3813
3814 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3815
3816 if (!qg_list)
3817 return -EINVAL;
3818
3819 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3820 return -EINVAL;
3821
3822 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3823 sum_size += struct_size(list, txqs, list->num_txqs);
3824 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3825 list->num_txqs);
3826 }
3827
3828 if (buf_size != sum_size)
3829 return -EINVAL;
3830
3831 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3832
3833 cmd->num_qgrps = num_qgrps;
3834
3835 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3836}
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850static int
3851ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3852 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3853 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3854 struct ice_sq_cd *cd)
3855{
3856 struct ice_aqc_dis_txq_item *item;
3857 struct ice_aqc_dis_txqs *cmd;
3858 struct ice_aq_desc desc;
3859 u16 i, sz = 0;
3860 int status;
3861
3862 cmd = &desc.params.dis_txqs;
3863 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3864
3865
3866 if (!qg_list && !rst_src)
3867 return -EINVAL;
3868
3869 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3870 return -EINVAL;
3871
3872 cmd->num_entries = num_qgrps;
3873
3874 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3875 ICE_AQC_Q_DIS_TIMEOUT_M);
3876
3877 switch (rst_src) {
3878 case ICE_VM_RESET:
3879 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3880 cmd->vmvf_and_timeout |=
3881 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3882 break;
3883 case ICE_VF_RESET:
3884 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3885
3886 cmd->vmvf_and_timeout |=
3887 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3888 ICE_AQC_Q_DIS_VMVF_NUM_M);
3889 break;
3890 case ICE_NO_RESET:
3891 default:
3892 break;
3893 }
3894
3895
3896 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3897
3898 if (!qg_list)
3899 goto do_aq;
3900
3901
3902
3903
3904 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3905
3906 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3907 u16 item_size = struct_size(item, q_id, item->num_qs);
3908
3909
3910 if ((item->num_qs % 2) == 0)
3911 item_size += 2;
3912
3913 sz += item_size;
3914
3915 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3916 }
3917
3918 if (buf_size != sz)
3919 return -EINVAL;
3920
3921do_aq:
3922 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3923 if (status) {
3924 if (!qg_list)
3925 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3926 vmvf_num, hw->adminq.sq_last_status);
3927 else
3928 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3929 le16_to_cpu(qg_list[0].q_id[0]),
3930 hw->adminq.sq_last_status);
3931 }
3932 return status;
3933}
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945static int
3946ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3947 struct ice_aqc_add_rdma_qset_data *qset_list,
3948 u16 buf_size, struct ice_sq_cd *cd)
3949{
3950 struct ice_aqc_add_rdma_qset_data *list;
3951 struct ice_aqc_add_rdma_qset *cmd;
3952 struct ice_aq_desc desc;
3953 u16 i, sum_size = 0;
3954
3955 cmd = &desc.params.add_rdma_qset;
3956
3957 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3958
3959 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3960 return -EINVAL;
3961
3962 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3963 u16 num_qsets = le16_to_cpu(list->num_qsets);
3964
3965 sum_size += struct_size(list, rdma_qsets, num_qsets);
3966 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3967 num_qsets);
3968 }
3969
3970 if (buf_size != sum_size)
3971 return -EINVAL;
3972
3973 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3974
3975 cmd->num_qset_grps = num_qset_grps;
3976
3977 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
3978}
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988static void
3989ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990{
3991 u8 src_byte, dest_byte, mask;
3992 u8 *from, *dest;
3993 u16 shift_width;
3994
3995
3996 from = src_ctx + ce_info->offset;
3997
3998
3999 shift_width = ce_info->lsb % 8;
4000 mask = (u8)(BIT(ce_info->width) - 1);
4001
4002 src_byte = *from;
4003 src_byte &= mask;
4004
4005
4006 mask <<= shift_width;
4007 src_byte <<= shift_width;
4008
4009
4010 dest = dest_ctx + (ce_info->lsb / 8);
4011
4012 memcpy(&dest_byte, dest, sizeof(dest_byte));
4013
4014 dest_byte &= ~mask;
4015 dest_byte |= src_byte;
4016
4017
4018 memcpy(dest, &dest_byte, sizeof(dest_byte));
4019}
4020
4021
4022
4023
4024
4025
4026
4027static void
4028ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4029{
4030 u16 src_word, mask;
4031 __le16 dest_word;
4032 u8 *from, *dest;
4033 u16 shift_width;
4034
4035
4036 from = src_ctx + ce_info->offset;
4037
4038
4039 shift_width = ce_info->lsb % 8;
4040 mask = BIT(ce_info->width) - 1;
4041
4042
4043
4044
4045 src_word = *(u16 *)from;
4046 src_word &= mask;
4047
4048
4049 mask <<= shift_width;
4050 src_word <<= shift_width;
4051
4052
4053 dest = dest_ctx + (ce_info->lsb / 8);
4054
4055 memcpy(&dest_word, dest, sizeof(dest_word));
4056
4057 dest_word &= ~(cpu_to_le16(mask));
4058 dest_word |= cpu_to_le16(src_word);
4059
4060
4061 memcpy(dest, &dest_word, sizeof(dest_word));
4062}
4063
4064
4065
4066
4067
4068
4069
4070static void
4071ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4072{
4073 u32 src_dword, mask;
4074 __le32 dest_dword;
4075 u8 *from, *dest;
4076 u16 shift_width;
4077
4078
4079 from = src_ctx + ce_info->offset;
4080
4081
4082 shift_width = ce_info->lsb % 8;
4083
4084
4085
4086
4087
4088 if (ce_info->width < 32)
4089 mask = BIT(ce_info->width) - 1;
4090 else
4091 mask = (u32)~0;
4092
4093
4094
4095
4096 src_dword = *(u32 *)from;
4097 src_dword &= mask;
4098
4099
4100 mask <<= shift_width;
4101 src_dword <<= shift_width;
4102
4103
4104 dest = dest_ctx + (ce_info->lsb / 8);
4105
4106 memcpy(&dest_dword, dest, sizeof(dest_dword));
4107
4108 dest_dword &= ~(cpu_to_le32(mask));
4109 dest_dword |= cpu_to_le32(src_dword);
4110
4111
4112 memcpy(dest, &dest_dword, sizeof(dest_dword));
4113}
4114
4115
4116
4117
4118
4119
4120
4121static void
4122ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4123{
4124 u64 src_qword, mask;
4125 __le64 dest_qword;
4126 u8 *from, *dest;
4127 u16 shift_width;
4128
4129
4130 from = src_ctx + ce_info->offset;
4131
4132
4133 shift_width = ce_info->lsb % 8;
4134
4135
4136
4137
4138
4139 if (ce_info->width < 64)
4140 mask = BIT_ULL(ce_info->width) - 1;
4141 else
4142 mask = (u64)~0;
4143
4144
4145
4146
4147 src_qword = *(u64 *)from;
4148 src_qword &= mask;
4149
4150
4151 mask <<= shift_width;
4152 src_qword <<= shift_width;
4153
4154
4155 dest = dest_ctx + (ce_info->lsb / 8);
4156
4157 memcpy(&dest_qword, dest, sizeof(dest_qword));
4158
4159 dest_qword &= ~(cpu_to_le64(mask));
4160 dest_qword |= cpu_to_le64(src_qword);
4161
4162
4163 memcpy(dest, &dest_qword, sizeof(dest_qword));
4164}
4165
4166
4167
4168
4169
4170
4171
4172
4173int
4174ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4175 const struct ice_ctx_ele *ce_info)
4176{
4177 int f;
4178
4179 for (f = 0; ce_info[f].width; f++) {
4180
4181
4182
4183
4184 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4185 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4186 f, ce_info[f].width, ce_info[f].size_of);
4187 continue;
4188 }
4189 switch (ce_info[f].size_of) {
4190 case sizeof(u8):
4191 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4192 break;
4193 case sizeof(u16):
4194 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4195 break;
4196 case sizeof(u32):
4197 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4198 break;
4199 case sizeof(u64):
4200 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4201 break;
4202 default:
4203 return -EINVAL;
4204 }
4205 }
4206
4207 return 0;
4208}
4209
4210
4211
4212
4213
4214
4215
4216
4217struct ice_q_ctx *
4218ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4219{
4220 struct ice_vsi_ctx *vsi;
4221 struct ice_q_ctx *q_ctx;
4222
4223 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4224 if (!vsi)
4225 return NULL;
4226 if (q_handle >= vsi->num_lan_q_entries[tc])
4227 return NULL;
4228 if (!vsi->lan_q_ctx[tc])
4229 return NULL;
4230 q_ctx = vsi->lan_q_ctx[tc];
4231 return &q_ctx[q_handle];
4232}
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247int
4248ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4249 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4250 struct ice_sq_cd *cd)
4251{
4252 struct ice_aqc_txsched_elem_data node = { 0 };
4253 struct ice_sched_node *parent;
4254 struct ice_q_ctx *q_ctx;
4255 struct ice_hw *hw;
4256 int status;
4257
4258 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4259 return -EIO;
4260
4261 if (num_qgrps > 1 || buf->num_txqs > 1)
4262 return -ENOSPC;
4263
4264 hw = pi->hw;
4265
4266 if (!ice_is_vsi_valid(hw, vsi_handle))
4267 return -EINVAL;
4268
4269 mutex_lock(&pi->sched_lock);
4270
4271 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4272 if (!q_ctx) {
4273 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4274 q_handle);
4275 status = -EINVAL;
4276 goto ena_txq_exit;
4277 }
4278
4279
4280 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4281 ICE_SCHED_NODE_OWNER_LAN);
4282 if (!parent) {
4283 status = -EINVAL;
4284 goto ena_txq_exit;
4285 }
4286
4287 buf->parent_teid = parent->info.node_teid;
4288 node.parent_teid = parent->info.node_teid;
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300 buf->txqs[0].info.valid_sections =
4301 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4302 ICE_AQC_ELEM_VALID_EIR;
4303 buf->txqs[0].info.generic = 0;
4304 buf->txqs[0].info.cir_bw.bw_profile_idx =
4305 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4306 buf->txqs[0].info.cir_bw.bw_alloc =
4307 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4308 buf->txqs[0].info.eir_bw.bw_profile_idx =
4309 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4310 buf->txqs[0].info.eir_bw.bw_alloc =
4311 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4312
4313
4314 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4315 if (status) {
4316 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4317 le16_to_cpu(buf->txqs[0].txq_id),
4318 hw->adminq.sq_last_status);
4319 goto ena_txq_exit;
4320 }
4321
4322 node.node_teid = buf->txqs[0].q_teid;
4323 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4324 q_ctx->q_handle = q_handle;
4325 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4326
4327
4328 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4329 if (!status)
4330 status = ice_sched_replay_q_bw(pi, q_ctx);
4331
4332ena_txq_exit:
4333 mutex_unlock(&pi->sched_lock);
4334 return status;
4335}
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352int
4353ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4354 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4355 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4356 struct ice_sq_cd *cd)
4357{
4358 struct ice_aqc_dis_txq_item *qg_list;
4359 struct ice_q_ctx *q_ctx;
4360 int status = -ENOENT;
4361 struct ice_hw *hw;
4362 u16 i, buf_size;
4363
4364 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4365 return -EIO;
4366
4367 hw = pi->hw;
4368
4369 if (!num_queues) {
4370
4371
4372
4373
4374 if (rst_src)
4375 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4376 vmvf_num, NULL);
4377 return -EIO;
4378 }
4379
4380 buf_size = struct_size(qg_list, q_id, 1);
4381 qg_list = kzalloc(buf_size, GFP_KERNEL);
4382 if (!qg_list)
4383 return -ENOMEM;
4384
4385 mutex_lock(&pi->sched_lock);
4386
4387 for (i = 0; i < num_queues; i++) {
4388 struct ice_sched_node *node;
4389
4390 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4391 if (!node)
4392 continue;
4393 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4394 if (!q_ctx) {
4395 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4396 q_handles[i]);
4397 continue;
4398 }
4399 if (q_ctx->q_handle != q_handles[i]) {
4400 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4401 q_ctx->q_handle, q_handles[i]);
4402 continue;
4403 }
4404 qg_list->parent_teid = node->info.parent_teid;
4405 qg_list->num_qs = 1;
4406 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4407 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4408 vmvf_num, cd);
4409
4410 if (status)
4411 break;
4412 ice_free_sched_node(pi, node);
4413 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4414 }
4415 mutex_unlock(&pi->sched_lock);
4416 kfree(qg_list);
4417 return status;
4418}
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430static int
4431ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4432 u16 *maxqs, u8 owner)
4433{
4434 int status = 0;
4435 u8 i;
4436
4437 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4438 return -EIO;
4439
4440 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4441 return -EINVAL;
4442
4443 mutex_lock(&pi->sched_lock);
4444
4445 ice_for_each_traffic_class(i) {
4446
4447 if (!ice_sched_get_tc_node(pi, i))
4448 continue;
4449
4450 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4451 ice_is_tc_ena(tc_bitmap, i));
4452 if (status)
4453 break;
4454 }
4455
4456 mutex_unlock(&pi->sched_lock);
4457 return status;
4458}
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469int
4470ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4471 u16 *max_lanqs)
4472{
4473 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4474 ICE_SCHED_NODE_OWNER_LAN);
4475}
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486int
4487ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4488 u16 *max_rdmaqs)
4489{
4490 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4491 ICE_SCHED_NODE_OWNER_RDMA);
4492}
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505int
4506ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4507 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4508{
4509 struct ice_aqc_txsched_elem_data node = { 0 };
4510 struct ice_aqc_add_rdma_qset_data *buf;
4511 struct ice_sched_node *parent;
4512 struct ice_hw *hw;
4513 u16 i, buf_size;
4514 int ret;
4515
4516 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4517 return -EIO;
4518 hw = pi->hw;
4519
4520 if (!ice_is_vsi_valid(hw, vsi_handle))
4521 return -EINVAL;
4522
4523 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4524 buf = kzalloc(buf_size, GFP_KERNEL);
4525 if (!buf)
4526 return -ENOMEM;
4527 mutex_lock(&pi->sched_lock);
4528
4529 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4530 ICE_SCHED_NODE_OWNER_RDMA);
4531 if (!parent) {
4532 ret = -EINVAL;
4533 goto rdma_error_exit;
4534 }
4535 buf->parent_teid = parent->info.node_teid;
4536 node.parent_teid = parent->info.node_teid;
4537
4538 buf->num_qsets = cpu_to_le16(num_qsets);
4539 for (i = 0; i < num_qsets; i++) {
4540 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4541 buf->rdma_qsets[i].info.valid_sections =
4542 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4543 ICE_AQC_ELEM_VALID_EIR;
4544 buf->rdma_qsets[i].info.generic = 0;
4545 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4546 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4547 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4548 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4549 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4550 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4551 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4552 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4553 }
4554 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4555 if (ret) {
4556 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4557 goto rdma_error_exit;
4558 }
4559 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4560 for (i = 0; i < num_qsets; i++) {
4561 node.node_teid = buf->rdma_qsets[i].qset_teid;
4562 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4563 &node);
4564 if (ret)
4565 break;
4566 qset_teid[i] = le32_to_cpu(node.node_teid);
4567 }
4568rdma_error_exit:
4569 mutex_unlock(&pi->sched_lock);
4570 kfree(buf);
4571 return ret;
4572}
4573
4574
4575
4576
4577
4578
4579
4580
4581int
4582ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4583 u16 *q_id)
4584{
4585 struct ice_aqc_dis_txq_item *qg_list;
4586 struct ice_hw *hw;
4587 int status = 0;
4588 u16 qg_size;
4589 int i;
4590
4591 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4592 return -EIO;
4593
4594 hw = pi->hw;
4595
4596 qg_size = struct_size(qg_list, q_id, 1);
4597 qg_list = kzalloc(qg_size, GFP_KERNEL);
4598 if (!qg_list)
4599 return -ENOMEM;
4600
4601 mutex_lock(&pi->sched_lock);
4602
4603 for (i = 0; i < count; i++) {
4604 struct ice_sched_node *node;
4605
4606 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4607 if (!node)
4608 continue;
4609
4610 qg_list->parent_teid = node->info.parent_teid;
4611 qg_list->num_qs = 1;
4612 qg_list->q_id[0] =
4613 cpu_to_le16(q_id[i] |
4614 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4615
4616 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4617 ICE_NO_RESET, 0, NULL);
4618 if (status)
4619 break;
4620
4621 ice_free_sched_node(pi, node);
4622 }
4623
4624 mutex_unlock(&pi->sched_lock);
4625 kfree(qg_list);
4626 return status;
4627}
4628
4629
4630
4631
4632
4633
4634
4635static int ice_replay_pre_init(struct ice_hw *hw)
4636{
4637 struct ice_switch_info *sw = hw->switch_info;
4638 u8 i;
4639
4640
4641 ice_rm_all_sw_replay_rule_info(hw);
4642
4643
4644
4645
4646 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4647 list_replace_init(&sw->recp_list[i].filt_rules,
4648 &sw->recp_list[i].filt_replay_rules);
4649 ice_sched_replay_agg_vsi_preinit(hw);
4650
4651 return 0;
4652}
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4663{
4664 int status;
4665
4666 if (!ice_is_vsi_valid(hw, vsi_handle))
4667 return -EINVAL;
4668
4669
4670 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4671 status = ice_replay_pre_init(hw);
4672 if (status)
4673 return status;
4674 }
4675
4676 status = ice_replay_rss_cfg(hw, vsi_handle);
4677 if (status)
4678 return status;
4679
4680 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4681 if (!status)
4682 status = ice_replay_vsi_agg(hw, vsi_handle);
4683 return status;
4684}
4685
4686
4687
4688
4689
4690
4691
4692void ice_replay_post(struct ice_hw *hw)
4693{
4694
4695 ice_rm_all_sw_replay_rule_info(hw);
4696 ice_sched_replay_agg(hw);
4697}
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707void
4708ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4709 u64 *prev_stat, u64 *cur_stat)
4710{
4711 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4712
4713
4714
4715
4716
4717
4718 if (!prev_stat_loaded) {
4719 *prev_stat = new_data;
4720 return;
4721 }
4722
4723
4724
4725
4726 if (new_data >= *prev_stat)
4727 *cur_stat += new_data - *prev_stat;
4728 else
4729
4730 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4731
4732
4733 *prev_stat = new_data;
4734}
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744void
4745ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4746 u64 *prev_stat, u64 *cur_stat)
4747{
4748 u32 new_data;
4749
4750 new_data = rd32(hw, reg);
4751
4752
4753
4754
4755
4756
4757 if (!prev_stat_loaded) {
4758 *prev_stat = new_data;
4759 return;
4760 }
4761
4762
4763
4764
4765 if (new_data >= *prev_stat)
4766 *cur_stat += new_data - *prev_stat;
4767 else
4768
4769 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4770
4771
4772 *prev_stat = new_data;
4773}
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783int
4784ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4785 struct ice_aqc_txsched_elem_data *buf)
4786{
4787 u16 buf_size, num_elem_ret = 0;
4788 int status;
4789
4790 buf_size = sizeof(*buf);
4791 memset(buf, 0, buf_size);
4792 buf->node_teid = cpu_to_le32(node_teid);
4793 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4794 NULL);
4795 if (status || num_elem_ret != 1)
4796 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4797 return status;
4798}
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815int
4816ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
4817 u16 bus_addr, __le16 addr, u8 params, u8 *data,
4818 struct ice_sq_cd *cd)
4819{
4820 struct ice_aq_desc desc = { 0 };
4821 struct ice_aqc_i2c *cmd;
4822 u8 data_size;
4823 int status;
4824
4825 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
4826 cmd = &desc.params.read_i2c;
4827
4828 if (!data)
4829 return -EINVAL;
4830
4831 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
4832
4833 cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
4834 cmd->topo_addr = topo_addr;
4835 cmd->i2c_params = params;
4836 cmd->i2c_addr = addr;
4837
4838 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4839 if (!status) {
4840 struct ice_aqc_read_i2c_resp *resp;
4841 u8 i;
4842
4843 resp = &desc.params.read_i2c_resp;
4844 for (i = 0; i < data_size; i++) {
4845 *data = resp->i2c_data[i];
4846 data++;
4847 }
4848 }
4849
4850 return status;
4851}
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868int
4869ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4870 u32 value, struct ice_sq_cd *cd)
4871{
4872 struct ice_aqc_driver_shared_params *cmd;
4873 struct ice_aq_desc desc;
4874
4875 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4876 return -EIO;
4877
4878 cmd = &desc.params.drv_shared_params;
4879
4880 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4881
4882 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4883 cmd->param_indx = idx;
4884 cmd->param_val = cpu_to_le32(value);
4885
4886 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4887}
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901int
4902ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4903 u32 *value, struct ice_sq_cd *cd)
4904{
4905 struct ice_aqc_driver_shared_params *cmd;
4906 struct ice_aq_desc desc;
4907 int status;
4908
4909 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4910 return -EIO;
4911
4912 cmd = &desc.params.drv_shared_params;
4913
4914 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4915
4916 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4917 cmd->param_indx = idx;
4918
4919 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4920 if (status)
4921 return status;
4922
4923 *value = le32_to_cpu(cmd->param_val);
4924
4925 return 0;
4926}
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938int
4939ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4940 struct ice_sq_cd *cd)
4941{
4942 struct ice_aqc_gpio *cmd;
4943 struct ice_aq_desc desc;
4944
4945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4946 cmd = &desc.params.read_write_gpio;
4947 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4948 cmd->gpio_num = pin_idx;
4949 cmd->gpio_val = value ? 1 : 0;
4950
4951 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4952}
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965int
4966ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
4967 bool *value, struct ice_sq_cd *cd)
4968{
4969 struct ice_aqc_gpio *cmd;
4970 struct ice_aq_desc desc;
4971 int status;
4972
4973 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
4974 cmd = &desc.params.read_write_gpio;
4975 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4976 cmd->gpio_num = pin_idx;
4977
4978 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4979 if (status)
4980 return status;
4981
4982 *value = !!cmd->gpio_val;
4983 return 0;
4984}
4985
4986
4987
4988
4989
4990
4991
4992bool ice_fw_supports_link_override(struct ice_hw *hw)
4993{
4994 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4995 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4996 return true;
4997 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4998 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4999 return true;
5000 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5001 return true;
5002 }
5003
5004 return false;
5005}
5006
5007
5008
5009
5010
5011
5012
5013
5014int
5015ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5016 struct ice_port_info *pi)
5017{
5018 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5019 struct ice_hw *hw = pi->hw;
5020 int status;
5021
5022 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5023 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5024 if (status) {
5025 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5026 return status;
5027 }
5028
5029
5030 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5031 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5032
5033
5034 status = ice_read_sr_word(hw, tlv_start, &buf);
5035 if (status) {
5036 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5037 return status;
5038 }
5039 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5040 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5041 ICE_LINK_OVERRIDE_PHY_CFG_S;
5042
5043
5044 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5045 status = ice_read_sr_word(hw, offset, &buf);
5046 if (status) {
5047 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5048 return status;
5049 }
5050 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5051
5052
5053 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5054 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5055 status = ice_read_sr_word(hw, (offset + i), &buf);
5056 if (status) {
5057 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5058 return status;
5059 }
5060
5061 ldo->phy_type_low |= ((u64)buf << (i * 16));
5062 }
5063
5064
5065 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5066 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5067 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5068 status = ice_read_sr_word(hw, (offset + i), &buf);
5069 if (status) {
5070 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5071 return status;
5072 }
5073
5074 ldo->phy_type_high |= ((u64)buf << (i * 16));
5075 }
5076
5077 return status;
5078}
5079
5080
5081
5082
5083
5084bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5085{
5086 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5087 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5088 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5089 ICE_AQC_PHY_AN_EN_CLAUSE37))
5090 return true;
5091
5092 return false;
5093}
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105int
5106ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5107 struct ice_sq_cd *cd)
5108{
5109 struct ice_aqc_lldp_set_local_mib *cmd;
5110 struct ice_aq_desc desc;
5111
5112 cmd = &desc.params.lldp_set_mib;
5113
5114 if (buf_size == 0 || !buf)
5115 return -EINVAL;
5116
5117 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5118
5119 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5120 desc.datalen = cpu_to_le16(buf_size);
5121
5122 cmd->type = mib_type;
5123 cmd->length = cpu_to_le16(buf_size);
5124
5125 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5126}
5127
5128
5129
5130
5131
5132bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5133{
5134 if (hw->mac_type != ICE_MAC_E810)
5135 return false;
5136
5137 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5138 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5139 return true;
5140 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5141 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5142 return true;
5143 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5144 return true;
5145 }
5146 return false;
5147}
5148
5149
5150
5151
5152
5153
5154
5155int
5156ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5157{
5158 struct ice_aqc_lldp_filter_ctrl *cmd;
5159 struct ice_aq_desc desc;
5160
5161 cmd = &desc.params.lldp_filter_ctrl;
5162
5163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5164
5165 if (add)
5166 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5167 else
5168 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5169
5170 cmd->vsi_num = cpu_to_le16(vsi_num);
5171
5172 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5173}
5174
5175
5176
5177
5178
5179
5180
5181bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5182{
5183 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5184 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5185 return true;
5186 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5187 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5188 return true;
5189 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5190 return true;
5191 }
5192 return false;
5193}
5194