1
2
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8
9#define ICE_PF_RESET_WAIT_COUNT 300
10
11
12
13
14
15
16
17
18static int ice_set_mac_type(struct ice_hw *hw)
19{
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return -ENODEV;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_BACKPLANE:
28 case ICE_DEV_ID_E810_XXV_QSFP:
29 case ICE_DEV_ID_E810_XXV_SFP:
30 hw->mac_type = ICE_MAC_E810;
31 break;
32 case ICE_DEV_ID_E823C_10G_BASE_T:
33 case ICE_DEV_ID_E823C_BACKPLANE:
34 case ICE_DEV_ID_E823C_QSFP:
35 case ICE_DEV_ID_E823C_SFP:
36 case ICE_DEV_ID_E823C_SGMII:
37 case ICE_DEV_ID_E822C_10G_BASE_T:
38 case ICE_DEV_ID_E822C_BACKPLANE:
39 case ICE_DEV_ID_E822C_QSFP:
40 case ICE_DEV_ID_E822C_SFP:
41 case ICE_DEV_ID_E822C_SGMII:
42 case ICE_DEV_ID_E822L_10G_BASE_T:
43 case ICE_DEV_ID_E822L_BACKPLANE:
44 case ICE_DEV_ID_E822L_SFP:
45 case ICE_DEV_ID_E822L_SGMII:
46 case ICE_DEV_ID_E823L_10G_BASE_T:
47 case ICE_DEV_ID_E823L_1GBE:
48 case ICE_DEV_ID_E823L_BACKPLANE:
49 case ICE_DEV_ID_E823L_QSFP:
50 case ICE_DEV_ID_E823L_SFP:
51 hw->mac_type = ICE_MAC_GENERIC;
52 break;
53 default:
54 hw->mac_type = ICE_MAC_UNKNOWN;
55 break;
56 }
57
58 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
59 return 0;
60}
61
62
63
64
65
66
67
68bool ice_is_e810(struct ice_hw *hw)
69{
70 return hw->mac_type == ICE_MAC_E810;
71}
72
73
74
75
76
77
78
79bool ice_is_e810t(struct ice_hw *hw)
80{
81 switch (hw->device_id) {
82 case ICE_DEV_ID_E810C_SFP:
83 if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
84 hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
85 return true;
86 break;
87 default:
88 break;
89 }
90
91 return false;
92}
93
94
95
96
97
98
99
100
101int ice_clear_pf_cfg(struct ice_hw *hw)
102{
103 struct ice_aq_desc desc;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
106
107 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125static int
126ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
127 struct ice_sq_cd *cd)
128{
129 struct ice_aqc_manage_mac_read_resp *resp;
130 struct ice_aqc_manage_mac_read *cmd;
131 struct ice_aq_desc desc;
132 int status;
133 u16 flags;
134 u8 i;
135
136 cmd = &desc.params.mac_read;
137
138 if (buf_size < sizeof(*resp))
139 return -EINVAL;
140
141 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
142
143 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
144 if (status)
145 return status;
146
147 resp = buf;
148 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
149
150 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
151 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
152 return -EIO;
153 }
154
155
156 for (i = 0; i < cmd->num_addr; i++)
157 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
158 ether_addr_copy(hw->port_info->mac.lan_addr,
159 resp[i].mac_addr);
160 ether_addr_copy(hw->port_info->mac.perm_addr,
161 resp[i].mac_addr);
162 break;
163 }
164
165 return 0;
166}
167
168
169
170
171
172
173
174
175
176
177
178int
179ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
180 struct ice_aqc_get_phy_caps_data *pcaps,
181 struct ice_sq_cd *cd)
182{
183 struct ice_aqc_get_phy_caps *cmd;
184 u16 pcaps_size = sizeof(*pcaps);
185 struct ice_aq_desc desc;
186 struct ice_hw *hw;
187 int status;
188
189 cmd = &desc.params.get_phy;
190
191 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
192 return -EINVAL;
193 hw = pi->hw;
194
195 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
196 !ice_fw_supports_report_dflt_cfg(hw))
197 return -EINVAL;
198
199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
200
201 if (qual_mods)
202 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
203
204 cmd->param0 |= cpu_to_le16(report_mode);
205 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
206
207 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
208 report_mode);
209 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
210 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
211 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
212 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
213 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
214 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
215 pcaps->low_power_ctrl_an);
216 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
217 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
218 pcaps->eeer_value);
219 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
220 pcaps->link_fec_options);
221 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
222 pcaps->module_compliance_enforcement);
223 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
224 pcaps->extended_compliance_code);
225 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
226 pcaps->module_type[0]);
227 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
228 pcaps->module_type[1]);
229 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
230 pcaps->module_type[2]);
231
232 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
233 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
234 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
235 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
236 sizeof(pi->phy.link_info.module_type));
237 }
238
239 return status;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254static int
255ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
256 struct ice_sq_cd *cd)
257{
258 struct ice_aqc_get_link_topo *cmd;
259 struct ice_aq_desc desc;
260
261 cmd = &desc.params.get_link_topo;
262
263 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
264
265 cmd->addr.topo_params.node_type_ctx =
266 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
267 ICE_AQC_LINK_TOPO_NODE_CTX_S);
268
269
270 cmd->addr.topo_params.node_type_ctx |=
271 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
272
273 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
274}
275
276
277
278
279
280
281
282
283static bool ice_is_media_cage_present(struct ice_port_info *pi)
284{
285
286
287
288
289 return !ice_aq_get_link_topo_handle(pi,
290 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
291 NULL);
292}
293
294
295
296
297
298static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
299{
300 struct ice_link_status *hw_link_info;
301
302 if (!pi)
303 return ICE_MEDIA_UNKNOWN;
304
305 hw_link_info = &pi->phy.link_info;
306 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
307
308 return ICE_MEDIA_UNKNOWN;
309
310 if (hw_link_info->phy_type_low) {
311
312
313
314
315
316 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
317 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
318 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
319 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
320 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
321 return ICE_MEDIA_DA;
322
323 switch (hw_link_info->phy_type_low) {
324 case ICE_PHY_TYPE_LOW_1000BASE_SX:
325 case ICE_PHY_TYPE_LOW_1000BASE_LX:
326 case ICE_PHY_TYPE_LOW_10GBASE_SR:
327 case ICE_PHY_TYPE_LOW_10GBASE_LR:
328 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
329 case ICE_PHY_TYPE_LOW_25GBASE_SR:
330 case ICE_PHY_TYPE_LOW_25GBASE_LR:
331 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
332 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
333 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
334 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
335 case ICE_PHY_TYPE_LOW_50GBASE_SR:
336 case ICE_PHY_TYPE_LOW_50GBASE_FR:
337 case ICE_PHY_TYPE_LOW_50GBASE_LR:
338 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
339 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
340 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
341 case ICE_PHY_TYPE_LOW_100GBASE_DR:
342 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
343 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
344 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
345 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
346 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
347 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
348 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
349 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
350 return ICE_MEDIA_FIBER;
351 case ICE_PHY_TYPE_LOW_100BASE_TX:
352 case ICE_PHY_TYPE_LOW_1000BASE_T:
353 case ICE_PHY_TYPE_LOW_2500BASE_T:
354 case ICE_PHY_TYPE_LOW_5GBASE_T:
355 case ICE_PHY_TYPE_LOW_10GBASE_T:
356 case ICE_PHY_TYPE_LOW_25GBASE_T:
357 return ICE_MEDIA_BASET;
358 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
359 case ICE_PHY_TYPE_LOW_25GBASE_CR:
360 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
361 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
362 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
363 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
364 case ICE_PHY_TYPE_LOW_50GBASE_CP:
365 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
366 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
367 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
368 return ICE_MEDIA_DA;
369 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
370 case ICE_PHY_TYPE_LOW_40G_XLAUI:
371 case ICE_PHY_TYPE_LOW_50G_LAUI2:
372 case ICE_PHY_TYPE_LOW_50G_AUI2:
373 case ICE_PHY_TYPE_LOW_50G_AUI1:
374 case ICE_PHY_TYPE_LOW_100G_AUI4:
375 case ICE_PHY_TYPE_LOW_100G_CAUI4:
376 if (ice_is_media_cage_present(pi))
377 return ICE_MEDIA_DA;
378 fallthrough;
379 case ICE_PHY_TYPE_LOW_1000BASE_KX:
380 case ICE_PHY_TYPE_LOW_2500BASE_KX:
381 case ICE_PHY_TYPE_LOW_2500BASE_X:
382 case ICE_PHY_TYPE_LOW_5GBASE_KR:
383 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
384 case ICE_PHY_TYPE_LOW_25GBASE_KR:
385 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
386 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
387 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
388 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
389 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
390 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
391 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
392 return ICE_MEDIA_BACKPLANE;
393 }
394 } else {
395 switch (hw_link_info->phy_type_high) {
396 case ICE_PHY_TYPE_HIGH_100G_AUI2:
397 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
398 if (ice_is_media_cage_present(pi))
399 return ICE_MEDIA_DA;
400 fallthrough;
401 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
402 return ICE_MEDIA_BACKPLANE;
403 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
404 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
405 return ICE_MEDIA_FIBER;
406 }
407 }
408 return ICE_MEDIA_UNKNOWN;
409}
410
411
412
413
414
415
416
417
418
419
420int
421ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
422 struct ice_link_status *link, struct ice_sq_cd *cd)
423{
424 struct ice_aqc_get_link_status_data link_data = { 0 };
425 struct ice_aqc_get_link_status *resp;
426 struct ice_link_status *li_old, *li;
427 enum ice_media_type *hw_media_type;
428 struct ice_fc_info *hw_fc_info;
429 bool tx_pause, rx_pause;
430 struct ice_aq_desc desc;
431 struct ice_hw *hw;
432 u16 cmd_flags;
433 int status;
434
435 if (!pi)
436 return -EINVAL;
437 hw = pi->hw;
438 li_old = &pi->phy.link_info_old;
439 hw_media_type = &pi->phy.media_type;
440 li = &pi->phy.link_info;
441 hw_fc_info = &pi->fc;
442
443 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
444 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
445 resp = &desc.params.get_link_status;
446 resp->cmd_flags = cpu_to_le16(cmd_flags);
447 resp->lport_num = pi->lport;
448
449 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
450
451 if (status)
452 return status;
453
454
455 *li_old = *li;
456
457
458 li->link_speed = le16_to_cpu(link_data.link_speed);
459 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
460 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
461 *hw_media_type = ice_get_media_type(pi);
462 li->link_info = link_data.link_info;
463 li->link_cfg_err = link_data.link_cfg_err;
464 li->an_info = link_data.an_info;
465 li->ext_info = link_data.ext_info;
466 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
467 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
468 li->topo_media_conflict = link_data.topo_media_conflict;
469 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
470 ICE_AQ_CFG_PACING_TYPE_M);
471
472
473 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
474 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
475 if (tx_pause && rx_pause)
476 hw_fc_info->current_mode = ICE_FC_FULL;
477 else if (tx_pause)
478 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
479 else if (rx_pause)
480 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
481 else
482 hw_fc_info->current_mode = ICE_FC_NONE;
483
484 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
485
486 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
487 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
488 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
489 (unsigned long long)li->phy_type_low);
490 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
491 (unsigned long long)li->phy_type_high);
492 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
493 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
494 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
495 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
496 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
497 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
498 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
499 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
500 li->max_frame_size);
501 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
502
503
504 if (link)
505 *link = *li;
506
507
508 pi->phy.get_link_info = false;
509
510 return 0;
511}
512
513
514
515
516
517
518
519
520
521static void
522ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
523 struct ice_aqc_set_mac_cfg *cmd)
524{
525 u16 fc_thres_val, tx_timer_val;
526 u32 val;
527
528
529
530
531
532
533
534
535#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
536
537
538 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
539 tx_timer_val = val &
540 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
541 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
542
543
544 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
545 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
546
547 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
548}
549
550
551
552
553
554
555
556
557
558int
559ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
560{
561 struct ice_aqc_set_mac_cfg *cmd;
562 struct ice_aq_desc desc;
563
564 cmd = &desc.params.set_mac_cfg;
565
566 if (max_frame_size == 0)
567 return -EINVAL;
568
569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
570
571 cmd->max_frame_size = cpu_to_le16(max_frame_size);
572
573 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
574
575 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
576}
577
578
579
580
581
582static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
583{
584 struct ice_switch_info *sw;
585 int status;
586
587 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
588 sizeof(*hw->switch_info), GFP_KERNEL);
589 sw = hw->switch_info;
590
591 if (!sw)
592 return -ENOMEM;
593
594 INIT_LIST_HEAD(&sw->vsi_list_map_head);
595 sw->prof_res_bm_init = 0;
596
597 status = ice_init_def_sw_recp(hw);
598 if (status) {
599 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
600 return status;
601 }
602 return 0;
603}
604
605
606
607
608
609static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
610{
611 struct ice_switch_info *sw = hw->switch_info;
612 struct ice_vsi_list_map_info *v_pos_map;
613 struct ice_vsi_list_map_info *v_tmp_map;
614 struct ice_sw_recipe *recps;
615 u8 i;
616
617 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
618 list_entry) {
619 list_del(&v_pos_map->list_entry);
620 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
621 }
622 recps = sw->recp_list;
623 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
624 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
625
626 recps[i].root_rid = i;
627 list_for_each_entry_safe(rg_entry, tmprg_entry,
628 &recps[i].rg_list, l_entry) {
629 list_del(&rg_entry->l_entry);
630 devm_kfree(ice_hw_to_dev(hw), rg_entry);
631 }
632
633 if (recps[i].adv_rule) {
634 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
635 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
636
637 mutex_destroy(&recps[i].filt_rule_lock);
638 list_for_each_entry_safe(lst_itr, tmp_entry,
639 &recps[i].filt_rules,
640 list_entry) {
641 list_del(&lst_itr->list_entry);
642 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
643 devm_kfree(ice_hw_to_dev(hw), lst_itr);
644 }
645 } else {
646 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647
648 mutex_destroy(&recps[i].filt_rule_lock);
649 list_for_each_entry_safe(lst_itr, tmp_entry,
650 &recps[i].filt_rules,
651 list_entry) {
652 list_del(&lst_itr->list_entry);
653 devm_kfree(ice_hw_to_dev(hw), lst_itr);
654 }
655 }
656 if (recps[i].root_buf)
657 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
658 }
659 ice_rm_all_sw_replay_rule_info(hw);
660 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
661 devm_kfree(ice_hw_to_dev(hw), sw);
662}
663
664
665
666
667
668static int ice_get_fw_log_cfg(struct ice_hw *hw)
669{
670 struct ice_aq_desc desc;
671 __le16 *config;
672 int status;
673 u16 size;
674
675 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
676 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
677 if (!config)
678 return -ENOMEM;
679
680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
681
682 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
683 if (!status) {
684 u16 i;
685
686
687 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
688 u16 v, m, flgs;
689
690 v = le16_to_cpu(config[i]);
691 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
692 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
693
694 if (m < ICE_AQC_FW_LOG_ID_MAX)
695 hw->fw_log.evnts[m].cur = flgs;
696 }
697 }
698
699 devm_kfree(ice_hw_to_dev(hw), config);
700
701 return status;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
741{
742 struct ice_aqc_fw_logging *cmd;
743 u16 i, chgs = 0, len = 0;
744 struct ice_aq_desc desc;
745 __le16 *data = NULL;
746 u8 actv_evnts = 0;
747 void *buf = NULL;
748 int status = 0;
749
750 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
751 return 0;
752
753
754 if (!enable &&
755 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
756 return 0;
757
758
759 status = ice_get_fw_log_cfg(hw);
760 if (status)
761 return status;
762
763 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
764 cmd = &desc.params.fw_logging;
765
766
767 if (hw->fw_log.cq_en)
768 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
769
770 if (hw->fw_log.uart_en)
771 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
772
773 if (enable) {
774
775
776
777 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
778 u16 val;
779
780
781 actv_evnts |= hw->fw_log.evnts[i].cfg;
782
783 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
784 continue;
785
786 if (!data) {
787 data = devm_kcalloc(ice_hw_to_dev(hw),
788 ICE_AQC_FW_LOG_ID_MAX,
789 sizeof(*data),
790 GFP_KERNEL);
791 if (!data)
792 return -ENOMEM;
793 }
794
795 val = i << ICE_AQC_FW_LOG_ID_S;
796 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
797 data[chgs++] = cpu_to_le16(val);
798 }
799
800
801
802
803
804 if (actv_evnts) {
805
806 if (!chgs)
807 goto out;
808
809 if (hw->fw_log.cq_en)
810 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
811
812 if (hw->fw_log.uart_en)
813 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
814
815 buf = data;
816 len = sizeof(*data) * chgs;
817 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
818 }
819 }
820
821 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
822 if (!status) {
823
824
825
826
827
828
829 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
830
831 hw->fw_log.actv_evnts = actv_evnts;
832 for (i = 0; i < cnt; i++) {
833 u16 v, m;
834
835 if (!enable) {
836
837
838
839
840
841
842 hw->fw_log.evnts[i].cur = 0;
843 continue;
844 }
845
846 v = le16_to_cpu(data[i]);
847 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
848 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
849 }
850 }
851
852out:
853 if (data)
854 devm_kfree(ice_hw_to_dev(hw), data);
855
856 return status;
857}
858
859
860
861
862
863
864
865
866
867void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
868{
869 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
870 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
871 le16_to_cpu(desc->datalen));
872 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
873}
874
875
876
877
878
879
880
881
882static void ice_get_itr_intrl_gran(struct ice_hw *hw)
883{
884 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
885 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
886 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
887
888 switch (max_agg_bw) {
889 case ICE_MAX_AGG_BW_200G:
890 case ICE_MAX_AGG_BW_100G:
891 case ICE_MAX_AGG_BW_50G:
892 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
893 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
894 break;
895 case ICE_MAX_AGG_BW_25G:
896 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
897 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
898 break;
899 }
900}
901
902
903
904
905
906int ice_init_hw(struct ice_hw *hw)
907{
908 struct ice_aqc_get_phy_caps_data *pcaps;
909 u16 mac_buf_len;
910 void *mac_buf;
911 int status;
912
913
914 status = ice_set_mac_type(hw);
915 if (status)
916 return status;
917
918 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
919 PF_FUNC_RID_FUNC_NUM_M) >>
920 PF_FUNC_RID_FUNC_NUM_S;
921
922 status = ice_reset(hw, ICE_RESET_PFR);
923 if (status)
924 return status;
925
926 ice_get_itr_intrl_gran(hw);
927
928 status = ice_create_all_ctrlq(hw);
929 if (status)
930 goto err_unroll_cqinit;
931
932
933 status = ice_cfg_fw_log(hw, true);
934 if (status)
935 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
936
937 status = ice_clear_pf_cfg(hw);
938 if (status)
939 goto err_unroll_cqinit;
940
941
942 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
943 INIT_LIST_HEAD(&hw->fdir_list_head);
944
945 ice_clear_pxe_mode(hw);
946
947 status = ice_init_nvm(hw);
948 if (status)
949 goto err_unroll_cqinit;
950
951 status = ice_get_caps(hw);
952 if (status)
953 goto err_unroll_cqinit;
954
955 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
956 sizeof(*hw->port_info), GFP_KERNEL);
957 if (!hw->port_info) {
958 status = -ENOMEM;
959 goto err_unroll_cqinit;
960 }
961
962
963 hw->port_info->hw = hw;
964
965
966 status = ice_get_initial_sw_cfg(hw);
967 if (status)
968 goto err_unroll_alloc;
969
970 hw->evb_veb = true;
971
972
973 status = ice_sched_query_res_alloc(hw);
974 if (status) {
975 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
976 goto err_unroll_alloc;
977 }
978 ice_sched_get_psm_clk_freq(hw);
979
980
981 status = ice_sched_init_port(hw->port_info);
982 if (status)
983 goto err_unroll_sched;
984
985 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
986 if (!pcaps) {
987 status = -ENOMEM;
988 goto err_unroll_sched;
989 }
990
991
992 status = ice_aq_get_phy_caps(hw->port_info, false,
993 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
994 NULL);
995 devm_kfree(ice_hw_to_dev(hw), pcaps);
996 if (status)
997 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
998 status);
999
1000
1001 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1002 if (status)
1003 goto err_unroll_sched;
1004
1005
1006 if (!hw->sw_entry_point_layer) {
1007 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1008 status = -EIO;
1009 goto err_unroll_sched;
1010 }
1011 INIT_LIST_HEAD(&hw->agg_list);
1012
1013 if (!hw->max_burst_size)
1014 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1015
1016 status = ice_init_fltr_mgmt_struct(hw);
1017 if (status)
1018 goto err_unroll_sched;
1019
1020
1021
1022 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
1023 sizeof(struct ice_aqc_manage_mac_read_resp),
1024 GFP_KERNEL);
1025 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1026
1027 if (!mac_buf) {
1028 status = -ENOMEM;
1029 goto err_unroll_fltr_mgmt_struct;
1030 }
1031
1032 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1033 devm_kfree(ice_hw_to_dev(hw), mac_buf);
1034
1035 if (status)
1036 goto err_unroll_fltr_mgmt_struct;
1037
1038 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
1039 if (status)
1040 goto err_unroll_fltr_mgmt_struct;
1041
1042 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
1043 if (status)
1044 goto err_unroll_fltr_mgmt_struct;
1045 status = ice_init_hw_tbls(hw);
1046 if (status)
1047 goto err_unroll_fltr_mgmt_struct;
1048 mutex_init(&hw->tnl_lock);
1049 return 0;
1050
1051err_unroll_fltr_mgmt_struct:
1052 ice_cleanup_fltr_mgmt_struct(hw);
1053err_unroll_sched:
1054 ice_sched_cleanup_all(hw);
1055err_unroll_alloc:
1056 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1057err_unroll_cqinit:
1058 ice_destroy_all_ctrlq(hw);
1059 return status;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070void ice_deinit_hw(struct ice_hw *hw)
1071{
1072 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1073 ice_cleanup_fltr_mgmt_struct(hw);
1074
1075 ice_sched_cleanup_all(hw);
1076 ice_sched_clear_agg(hw);
1077 ice_free_seg(hw);
1078 ice_free_hw_tbls(hw);
1079 mutex_destroy(&hw->tnl_lock);
1080
1081 if (hw->port_info) {
1082 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1083 hw->port_info = NULL;
1084 }
1085
1086
1087 ice_cfg_fw_log(hw, false);
1088 ice_destroy_all_ctrlq(hw);
1089
1090
1091 ice_clear_all_vsi_ctx(hw);
1092}
1093
1094
1095
1096
1097
1098int ice_check_reset(struct ice_hw *hw)
1099{
1100 u32 cnt, reg = 0, grst_timeout, uld_mask;
1101
1102
1103
1104
1105
1106 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1107 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1108
1109 for (cnt = 0; cnt < grst_timeout; cnt++) {
1110 mdelay(100);
1111 reg = rd32(hw, GLGEN_RSTAT);
1112 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1113 break;
1114 }
1115
1116 if (cnt == grst_timeout) {
1117 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1118 return -EIO;
1119 }
1120
1121#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1122 GLNVM_ULD_PCIER_DONE_1_M |\
1123 GLNVM_ULD_CORER_DONE_M |\
1124 GLNVM_ULD_GLOBR_DONE_M |\
1125 GLNVM_ULD_POR_DONE_M |\
1126 GLNVM_ULD_POR_DONE_1_M |\
1127 GLNVM_ULD_PCIER_DONE_2_M)
1128
1129 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
1130 GLNVM_ULD_PE_DONE_M : 0);
1131
1132
1133 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1134 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1135 if (reg == uld_mask) {
1136 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1137 break;
1138 }
1139 mdelay(10);
1140 }
1141
1142 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1143 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1144 reg);
1145 return -EIO;
1146 }
1147
1148 return 0;
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158static int ice_pf_reset(struct ice_hw *hw)
1159{
1160 u32 cnt, reg;
1161
1162
1163
1164
1165
1166
1167 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1168 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1169
1170 if (ice_check_reset(hw))
1171 return -EIO;
1172
1173 return 0;
1174 }
1175
1176
1177 reg = rd32(hw, PFGEN_CTRL);
1178
1179 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1180
1181
1182
1183
1184
1185 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1186 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1187 reg = rd32(hw, PFGEN_CTRL);
1188 if (!(reg & PFGEN_CTRL_PFSWR_M))
1189 break;
1190
1191 mdelay(1);
1192 }
1193
1194 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1195 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1196 return -EIO;
1197 }
1198
1199 return 0;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1215{
1216 u32 val = 0;
1217
1218 switch (req) {
1219 case ICE_RESET_PFR:
1220 return ice_pf_reset(hw);
1221 case ICE_RESET_CORER:
1222 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1223 val = GLGEN_RTRIG_CORER_M;
1224 break;
1225 case ICE_RESET_GLOBR:
1226 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1227 val = GLGEN_RTRIG_GLOBR_M;
1228 break;
1229 default:
1230 return -EINVAL;
1231 }
1232
1233 val |= rd32(hw, GLGEN_RTRIG);
1234 wr32(hw, GLGEN_RTRIG, val);
1235 ice_flush(hw);
1236
1237
1238 return ice_check_reset(hw);
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static int
1250ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1251{
1252 u8 i;
1253
1254 if (!ice_rxq_ctx)
1255 return -EINVAL;
1256
1257 if (rxq_index > QRX_CTRL_MAX_INDEX)
1258 return -EINVAL;
1259
1260
1261 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1262 wr32(hw, QRX_CONTEXT(i, rxq_index),
1263 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1264
1265 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1266 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1267 }
1268
1269 return 0;
1270}
1271
1272
1273static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1274
1275 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1276 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1277 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1278 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1279 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1280 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1281 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1282 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1283 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1284 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1285 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1286 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1287 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1288 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1289 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1290 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1291 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1292 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1293 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1294 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1295 { 0 }
1296};
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308int
1309ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1310 u32 rxq_index)
1311{
1312 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1313
1314 if (!rlan_ctx)
1315 return -EINVAL;
1316
1317 rlan_ctx->prefena = 1;
1318
1319 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1320 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1321}
1322
1323
1324const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1325
1326 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1327 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1328 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1329 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1330 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1331 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1332 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1333 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1334 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1335 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1336 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1337 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1338 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1339 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1340 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1341 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1342 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1343 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1344 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1345 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1346 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1347 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1348 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1349 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1350 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1351 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1352 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1353 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1354 { 0 }
1355};
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static int
1368ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1369 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1370{
1371 return ice_sq_send_cmd(hw, ice_get_sbq(hw),
1372 (struct ice_aq_desc *)desc, buf, buf_size, cd);
1373}
1374
1375
1376
1377
1378
1379
1380int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
1381{
1382 struct ice_sbq_cmd_desc desc = {0};
1383 struct ice_sbq_msg_req msg = {0};
1384 u16 msg_len;
1385 int status;
1386
1387 msg_len = sizeof(msg);
1388
1389 msg.dest_dev = in->dest_dev;
1390 msg.opcode = in->opcode;
1391 msg.flags = ICE_SBQ_MSG_FLAGS;
1392 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1393 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
1394 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
1395
1396 if (in->opcode)
1397 msg.data = cpu_to_le32(in->data);
1398 else
1399
1400
1401
1402 msg_len -= sizeof(msg.data);
1403
1404 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
1405 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
1406 desc.param0.cmd_len = cpu_to_le16(msg_len);
1407 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1408 if (!status && !in->opcode)
1409 in->data = le32_to_cpu
1410 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1411 return status;
1412}
1413
1414
1415
1416
1417
1418
1419
1420DEFINE_MUTEX(ice_global_cfg_lock_sw);
1421
1422
1423
1424
1425
1426
1427
1428
1429static bool ice_should_retry_sq_send_cmd(u16 opcode)
1430{
1431 switch (opcode) {
1432 case ice_aqc_opc_get_link_topo:
1433 case ice_aqc_opc_lldp_stop:
1434 case ice_aqc_opc_lldp_start:
1435 case ice_aqc_opc_lldp_filter_ctrl:
1436 return true;
1437 }
1438
1439 return false;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static int
1455ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1456 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1457 struct ice_sq_cd *cd)
1458{
1459 struct ice_aq_desc desc_cpy;
1460 bool is_cmd_for_retry;
1461 u8 *buf_cpy = NULL;
1462 u8 idx = 0;
1463 u16 opcode;
1464 int status;
1465
1466 opcode = le16_to_cpu(desc->opcode);
1467 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1468 memset(&desc_cpy, 0, sizeof(desc_cpy));
1469
1470 if (is_cmd_for_retry) {
1471 if (buf) {
1472 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1473 if (!buf_cpy)
1474 return -ENOMEM;
1475 }
1476
1477 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1478 }
1479
1480 do {
1481 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1482
1483 if (!is_cmd_for_retry || !status ||
1484 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1485 break;
1486
1487 if (buf_cpy)
1488 memcpy(buf, buf_cpy, buf_size);
1489
1490 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1491
1492 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1493
1494 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1495
1496 kfree(buf_cpy);
1497
1498 return status;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511int
1512ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1513 u16 buf_size, struct ice_sq_cd *cd)
1514{
1515 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1516 bool lock_acquired = false;
1517 int status;
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 switch (le16_to_cpu(desc->opcode)) {
1528 case ice_aqc_opc_download_pkg:
1529 case ice_aqc_opc_get_pkg_info_list:
1530 case ice_aqc_opc_get_ver:
1531 break;
1532 case ice_aqc_opc_release_res:
1533 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1534 break;
1535 fallthrough;
1536 default:
1537 mutex_lock(&ice_global_cfg_lock_sw);
1538 lock_acquired = true;
1539 break;
1540 }
1541
1542 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1543 if (lock_acquired)
1544 mutex_unlock(&ice_global_cfg_lock_sw);
1545
1546 return status;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1557{
1558 struct ice_aqc_get_ver *resp;
1559 struct ice_aq_desc desc;
1560 int status;
1561
1562 resp = &desc.params.get_ver;
1563
1564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1565
1566 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1567
1568 if (!status) {
1569 hw->fw_branch = resp->fw_branch;
1570 hw->fw_maj_ver = resp->fw_major;
1571 hw->fw_min_ver = resp->fw_minor;
1572 hw->fw_patch = resp->fw_patch;
1573 hw->fw_build = le32_to_cpu(resp->fw_build);
1574 hw->api_branch = resp->api_branch;
1575 hw->api_maj_ver = resp->api_major;
1576 hw->api_min_ver = resp->api_minor;
1577 hw->api_patch = resp->api_patch;
1578 }
1579
1580 return status;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591int
1592ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1593 struct ice_sq_cd *cd)
1594{
1595 struct ice_aqc_driver_ver *cmd;
1596 struct ice_aq_desc desc;
1597 u16 len;
1598
1599 cmd = &desc.params.driver_ver;
1600
1601 if (!dv)
1602 return -EINVAL;
1603
1604 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1605
1606 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1607 cmd->major_ver = dv->major_ver;
1608 cmd->minor_ver = dv->minor_ver;
1609 cmd->build_ver = dv->build_ver;
1610 cmd->subbuild_ver = dv->subbuild_ver;
1611
1612 len = 0;
1613 while (len < sizeof(dv->driver_string) &&
1614 isascii(dv->driver_string[len]) && dv->driver_string[len])
1615 len++;
1616
1617 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1629{
1630 struct ice_aqc_q_shutdown *cmd;
1631 struct ice_aq_desc desc;
1632
1633 cmd = &desc.params.q_shutdown;
1634
1635 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1636
1637 if (unloading)
1638 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1639
1640 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669static int
1670ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1671 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1672 struct ice_sq_cd *cd)
1673{
1674 struct ice_aqc_req_res *cmd_resp;
1675 struct ice_aq_desc desc;
1676 int status;
1677
1678 cmd_resp = &desc.params.res_owner;
1679
1680 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1681
1682 cmd_resp->res_id = cpu_to_le16(res);
1683 cmd_resp->access_type = cpu_to_le16(access);
1684 cmd_resp->res_number = cpu_to_le32(sdp_number);
1685 cmd_resp->timeout = cpu_to_le32(*timeout);
1686 *timeout = 0;
1687
1688 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1702 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1703 *timeout = le32_to_cpu(cmd_resp->timeout);
1704 return 0;
1705 } else if (le16_to_cpu(cmd_resp->status) ==
1706 ICE_AQ_RES_GLBL_IN_PROG) {
1707 *timeout = le32_to_cpu(cmd_resp->timeout);
1708 return -EIO;
1709 } else if (le16_to_cpu(cmd_resp->status) ==
1710 ICE_AQ_RES_GLBL_DONE) {
1711 return -EALREADY;
1712 }
1713
1714
1715 *timeout = 0;
1716 return -EIO;
1717 }
1718
1719
1720
1721
1722
1723 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1724 *timeout = le32_to_cpu(cmd_resp->timeout);
1725
1726 return status;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738static int
1739ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1740 struct ice_sq_cd *cd)
1741{
1742 struct ice_aqc_req_res *cmd;
1743 struct ice_aq_desc desc;
1744
1745 cmd = &desc.params.res_owner;
1746
1747 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1748
1749 cmd->res_id = cpu_to_le16(res);
1750 cmd->res_number = cpu_to_le32(sdp_number);
1751
1752 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764int
1765ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1766 enum ice_aq_res_access_type access, u32 timeout)
1767{
1768#define ICE_RES_POLLING_DELAY_MS 10
1769 u32 delay = ICE_RES_POLLING_DELAY_MS;
1770 u32 time_left = timeout;
1771 int status;
1772
1773 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1774
1775
1776
1777
1778
1779
1780 if (status == -EALREADY)
1781 goto ice_acquire_res_exit;
1782
1783 if (status)
1784 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1785
1786
1787 timeout = time_left;
1788 while (status && timeout && time_left) {
1789 mdelay(delay);
1790 timeout = (timeout > delay) ? timeout - delay : 0;
1791 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1792
1793 if (status == -EALREADY)
1794
1795 break;
1796
1797 if (!status)
1798
1799 break;
1800 }
1801 if (status && status != -EALREADY)
1802 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1803
1804ice_acquire_res_exit:
1805 if (status == -EALREADY) {
1806 if (access == ICE_RES_WRITE)
1807 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1808 else
1809 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
1810 }
1811 return status;
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1822{
1823 u32 total_delay = 0;
1824 int status;
1825
1826 status = ice_aq_release_res(hw, res, 0, NULL);
1827
1828
1829
1830
1831 while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) {
1832 mdelay(1);
1833 status = ice_aq_release_res(hw, res, 0, NULL);
1834 total_delay++;
1835 }
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849int
1850ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1851 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1852 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1853{
1854 struct ice_aqc_alloc_free_res_cmd *cmd;
1855 struct ice_aq_desc desc;
1856
1857 cmd = &desc.params.sw_res_ctrl;
1858
1859 if (!buf)
1860 return -EINVAL;
1861
1862 if (buf_size < flex_array_size(buf, elem, num_entries))
1863 return -EINVAL;
1864
1865 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1866
1867 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1868
1869 cmd->num_entries = cpu_to_le16(num_entries);
1870
1871 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882int
1883ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1884{
1885 struct ice_aqc_alloc_free_res_elem *buf;
1886 u16 buf_len;
1887 int status;
1888
1889 buf_len = struct_size(buf, elem, num);
1890 buf = kzalloc(buf_len, GFP_KERNEL);
1891 if (!buf)
1892 return -ENOMEM;
1893
1894
1895 buf->num_elems = cpu_to_le16(num);
1896 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1897 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1898 if (btm)
1899 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1900
1901 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1902 ice_aqc_opc_alloc_res, NULL);
1903 if (status)
1904 goto ice_alloc_res_exit;
1905
1906 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1907
1908ice_alloc_res_exit:
1909 kfree(buf);
1910 return status;
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1921{
1922 struct ice_aqc_alloc_free_res_elem *buf;
1923 u16 buf_len;
1924 int status;
1925
1926 buf_len = struct_size(buf, elem, num);
1927 buf = kzalloc(buf_len, GFP_KERNEL);
1928 if (!buf)
1929 return -ENOMEM;
1930
1931
1932 buf->num_elems = cpu_to_le16(num);
1933 buf->res_type = cpu_to_le16(type);
1934 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1935
1936 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1937 ice_aqc_opc_free_res, NULL);
1938 if (status)
1939 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1940
1941 kfree(buf);
1942 return status;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1955{
1956 u8 funcs;
1957
1958#define ICE_CAPS_VALID_FUNCS_M 0xFF
1959 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1960 ICE_CAPS_VALID_FUNCS_M);
1961
1962 if (!funcs)
1963 return 0;
1964
1965 return max / funcs;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981static bool
1982ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1983 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1984{
1985 u32 logical_id = le32_to_cpu(elem->logical_id);
1986 u32 phys_id = le32_to_cpu(elem->phys_id);
1987 u32 number = le32_to_cpu(elem->number);
1988 u16 cap = le16_to_cpu(elem->cap);
1989 bool found = true;
1990
1991 switch (cap) {
1992 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1993 caps->valid_functions = number;
1994 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1995 caps->valid_functions);
1996 break;
1997 case ICE_AQC_CAPS_SRIOV:
1998 caps->sr_iov_1_1 = (number == 1);
1999 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
2000 caps->sr_iov_1_1);
2001 break;
2002 case ICE_AQC_CAPS_DCB:
2003 caps->dcb = (number == 1);
2004 caps->active_tc_bitmap = logical_id;
2005 caps->maxtc = phys_id;
2006 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
2007 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
2008 caps->active_tc_bitmap);
2009 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
2010 break;
2011 case ICE_AQC_CAPS_RSS:
2012 caps->rss_table_size = number;
2013 caps->rss_table_entry_width = logical_id;
2014 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
2015 caps->rss_table_size);
2016 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
2017 caps->rss_table_entry_width);
2018 break;
2019 case ICE_AQC_CAPS_RXQS:
2020 caps->num_rxq = number;
2021 caps->rxq_first_id = phys_id;
2022 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
2023 caps->num_rxq);
2024 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
2025 caps->rxq_first_id);
2026 break;
2027 case ICE_AQC_CAPS_TXQS:
2028 caps->num_txq = number;
2029 caps->txq_first_id = phys_id;
2030 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
2031 caps->num_txq);
2032 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
2033 caps->txq_first_id);
2034 break;
2035 case ICE_AQC_CAPS_MSIX:
2036 caps->num_msix_vectors = number;
2037 caps->msix_vector_first_id = phys_id;
2038 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
2039 caps->num_msix_vectors);
2040 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
2041 caps->msix_vector_first_id);
2042 break;
2043 case ICE_AQC_CAPS_PENDING_NVM_VER:
2044 caps->nvm_update_pending_nvm = true;
2045 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
2046 break;
2047 case ICE_AQC_CAPS_PENDING_OROM_VER:
2048 caps->nvm_update_pending_orom = true;
2049 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
2050 break;
2051 case ICE_AQC_CAPS_PENDING_NET_VER:
2052 caps->nvm_update_pending_netlist = true;
2053 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
2054 break;
2055 case ICE_AQC_CAPS_NVM_MGMT:
2056 caps->nvm_unified_update =
2057 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2058 true : false;
2059 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2060 caps->nvm_unified_update);
2061 break;
2062 case ICE_AQC_CAPS_RDMA:
2063 caps->rdma = (number == 1);
2064 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
2065 break;
2066 case ICE_AQC_CAPS_MAX_MTU:
2067 caps->max_mtu = number;
2068 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2069 prefix, caps->max_mtu);
2070 break;
2071 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2072 caps->pcie_reset_avoidance = (number > 0);
2073 ice_debug(hw, ICE_DBG_INIT,
2074 "%s: pcie_reset_avoidance = %d\n", prefix,
2075 caps->pcie_reset_avoidance);
2076 break;
2077 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2078 caps->reset_restrict_support = (number == 1);
2079 ice_debug(hw, ICE_DBG_INIT,
2080 "%s: reset_restrict_support = %d\n", prefix,
2081 caps->reset_restrict_support);
2082 break;
2083 default:
2084
2085 found = false;
2086 }
2087
2088 return found;
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100static void
2101ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2102{
2103
2104
2105
2106 if (hw->dev_caps.num_funcs > 4) {
2107
2108 caps->maxtc = 4;
2109 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2110 caps->maxtc);
2111 if (caps->rdma) {
2112 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2113 caps->rdma = 0;
2114 }
2115
2116
2117
2118
2119 if (caps == &hw->dev_caps.common_cap)
2120 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
2121 }
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132static void
2133ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2134 struct ice_aqc_list_caps_elem *cap)
2135{
2136 u32 logical_id = le32_to_cpu(cap->logical_id);
2137 u32 number = le32_to_cpu(cap->number);
2138
2139 func_p->num_allocd_vfs = number;
2140 func_p->vf_base_id = logical_id;
2141 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2142 func_p->num_allocd_vfs);
2143 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2144 func_p->vf_base_id);
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static void
2156ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2157 struct ice_aqc_list_caps_elem *cap)
2158{
2159 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2160 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2161 le32_to_cpu(cap->number));
2162 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2163 func_p->guar_num_vsi);
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174static void
2175ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2176 struct ice_aqc_list_caps_elem *cap)
2177{
2178 struct ice_ts_func_info *info = &func_p->ts_func_info;
2179 u32 number = le32_to_cpu(cap->number);
2180
2181 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
2182 func_p->common_cap.ieee_1588 = info->ena;
2183
2184 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
2185 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
2186 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
2187 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
2188
2189 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
2190 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
2191
2192 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
2193 info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
2194 } else {
2195
2196
2197
2198
2199 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
2200 info->clk_freq);
2201 info->time_ref = ICE_TIME_REF_FREQ_25_000;
2202 }
2203
2204 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
2205 func_p->common_cap.ieee_1588);
2206 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
2207 info->src_tmr_owned);
2208 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
2209 info->tmr_ena);
2210 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
2211 info->tmr_index_owned);
2212 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
2213 info->tmr_index_assoc);
2214 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
2215 info->clk_freq);
2216 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
2217 info->clk_src);
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227static void
2228ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2229{
2230 u32 reg_val, val;
2231
2232 reg_val = rd32(hw, GLQF_FD_SIZE);
2233 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2234 GLQF_FD_SIZE_FD_GSIZE_S;
2235 func_p->fd_fltr_guar =
2236 ice_get_num_per_func(hw, val);
2237 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2238 GLQF_FD_SIZE_FD_BSIZE_S;
2239 func_p->fd_fltr_best_effort = val;
2240
2241 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2242 func_p->fd_fltr_guar);
2243 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2244 func_p->fd_fltr_best_effort);
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static void
2262ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2263 void *buf, u32 cap_count)
2264{
2265 struct ice_aqc_list_caps_elem *cap_resp;
2266 u32 i;
2267
2268 cap_resp = buf;
2269
2270 memset(func_p, 0, sizeof(*func_p));
2271
2272 for (i = 0; i < cap_count; i++) {
2273 u16 cap = le16_to_cpu(cap_resp[i].cap);
2274 bool found;
2275
2276 found = ice_parse_common_caps(hw, &func_p->common_cap,
2277 &cap_resp[i], "func caps");
2278
2279 switch (cap) {
2280 case ICE_AQC_CAPS_VF:
2281 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2282 break;
2283 case ICE_AQC_CAPS_VSI:
2284 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2285 break;
2286 case ICE_AQC_CAPS_1588:
2287 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
2288 break;
2289 case ICE_AQC_CAPS_FD:
2290 ice_parse_fdir_func_caps(hw, func_p);
2291 break;
2292 default:
2293
2294 if (!found)
2295 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2296 i, cap);
2297 break;
2298 }
2299 }
2300
2301 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2302}
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312static void
2313ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2314 struct ice_aqc_list_caps_elem *cap)
2315{
2316 u32 number = le32_to_cpu(cap->number);
2317
2318 dev_p->num_funcs = hweight32(number);
2319 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2320 dev_p->num_funcs);
2321}
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331static void
2332ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2333 struct ice_aqc_list_caps_elem *cap)
2334{
2335 u32 number = le32_to_cpu(cap->number);
2336
2337 dev_p->num_vfs_exposed = number;
2338 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2339 dev_p->num_vfs_exposed);
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static void
2351ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2352 struct ice_aqc_list_caps_elem *cap)
2353{
2354 u32 number = le32_to_cpu(cap->number);
2355
2356 dev_p->num_vsi_allocd_to_host = number;
2357 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2358 dev_p->num_vsi_allocd_to_host);
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369static void
2370ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2371 struct ice_aqc_list_caps_elem *cap)
2372{
2373 struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
2374 u32 logical_id = le32_to_cpu(cap->logical_id);
2375 u32 phys_id = le32_to_cpu(cap->phys_id);
2376 u32 number = le32_to_cpu(cap->number);
2377
2378 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
2379 dev_p->common_cap.ieee_1588 = info->ena;
2380
2381 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
2382 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
2383 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
2384
2385 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
2386 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
2387 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
2388
2389 info->ena_ports = logical_id;
2390 info->tmr_own_map = phys_id;
2391
2392 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
2393 dev_p->common_cap.ieee_1588);
2394 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
2395 info->tmr0_owner);
2396 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
2397 info->tmr0_owned);
2398 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
2399 info->tmr0_ena);
2400 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
2401 info->tmr1_owner);
2402 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
2403 info->tmr1_owned);
2404 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
2405 info->tmr1_ena);
2406 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
2407 info->ena_ports);
2408 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
2409 info->tmr_own_map);
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420static void
2421ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2422 struct ice_aqc_list_caps_elem *cap)
2423{
2424 u32 number = le32_to_cpu(cap->number);
2425
2426 dev_p->num_flow_director_fltr = number;
2427 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2428 dev_p->num_flow_director_fltr);
2429}
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445static void
2446ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2447 void *buf, u32 cap_count)
2448{
2449 struct ice_aqc_list_caps_elem *cap_resp;
2450 u32 i;
2451
2452 cap_resp = buf;
2453
2454 memset(dev_p, 0, sizeof(*dev_p));
2455
2456 for (i = 0; i < cap_count; i++) {
2457 u16 cap = le16_to_cpu(cap_resp[i].cap);
2458 bool found;
2459
2460 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2461 &cap_resp[i], "dev caps");
2462
2463 switch (cap) {
2464 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2465 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2466 break;
2467 case ICE_AQC_CAPS_VF:
2468 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2469 break;
2470 case ICE_AQC_CAPS_VSI:
2471 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2472 break;
2473 case ICE_AQC_CAPS_1588:
2474 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
2475 break;
2476 case ICE_AQC_CAPS_FD:
2477 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2478 break;
2479 default:
2480
2481 if (!found)
2482 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2483 i, cap);
2484 break;
2485 }
2486 }
2487
2488 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510int
2511ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2512 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2513{
2514 struct ice_aqc_list_caps *cmd;
2515 struct ice_aq_desc desc;
2516 int status;
2517
2518 cmd = &desc.params.get_cap;
2519
2520 if (opc != ice_aqc_opc_list_func_caps &&
2521 opc != ice_aqc_opc_list_dev_caps)
2522 return -EINVAL;
2523
2524 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2525 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2526
2527 if (cap_count)
2528 *cap_count = le32_to_cpu(cmd->count);
2529
2530 return status;
2531}
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541int
2542ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2543{
2544 u32 cap_count = 0;
2545 void *cbuf;
2546 int status;
2547
2548 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2549 if (!cbuf)
2550 return -ENOMEM;
2551
2552
2553
2554
2555
2556 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2557
2558 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2559 ice_aqc_opc_list_dev_caps, NULL);
2560 if (!status)
2561 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2562 kfree(cbuf);
2563
2564 return status;
2565}
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575static int
2576ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2577{
2578 u32 cap_count = 0;
2579 void *cbuf;
2580 int status;
2581
2582 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2583 if (!cbuf)
2584 return -ENOMEM;
2585
2586
2587
2588
2589
2590 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2591
2592 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2593 ice_aqc_opc_list_func_caps, NULL);
2594 if (!status)
2595 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2596 kfree(cbuf);
2597
2598 return status;
2599}
2600
2601
2602
2603
2604
2605void ice_set_safe_mode_caps(struct ice_hw *hw)
2606{
2607 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2608 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2609 struct ice_hw_common_caps cached_caps;
2610 u32 num_funcs;
2611
2612
2613 cached_caps = func_caps->common_cap;
2614
2615
2616 memset(func_caps, 0, sizeof(*func_caps));
2617
2618#define ICE_RESTORE_FUNC_CAP(name) \
2619 func_caps->common_cap.name = cached_caps.name
2620
2621
2622 ICE_RESTORE_FUNC_CAP(valid_functions);
2623 ICE_RESTORE_FUNC_CAP(txq_first_id);
2624 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2625 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2626 ICE_RESTORE_FUNC_CAP(max_mtu);
2627 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2628 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2629 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2630 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2631
2632
2633 func_caps->common_cap.num_rxq = 1;
2634 func_caps->common_cap.num_txq = 1;
2635
2636
2637 func_caps->common_cap.num_msix_vectors = 2;
2638 func_caps->guar_num_vsi = 1;
2639
2640
2641 cached_caps = dev_caps->common_cap;
2642 num_funcs = dev_caps->num_funcs;
2643
2644
2645 memset(dev_caps, 0, sizeof(*dev_caps));
2646
2647#define ICE_RESTORE_DEV_CAP(name) \
2648 dev_caps->common_cap.name = cached_caps.name
2649
2650
2651 ICE_RESTORE_DEV_CAP(valid_functions);
2652 ICE_RESTORE_DEV_CAP(txq_first_id);
2653 ICE_RESTORE_DEV_CAP(rxq_first_id);
2654 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2655 ICE_RESTORE_DEV_CAP(max_mtu);
2656 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2657 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2658 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2659 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2660 dev_caps->num_funcs = num_funcs;
2661
2662
2663 dev_caps->common_cap.num_rxq = num_funcs;
2664 dev_caps->common_cap.num_txq = num_funcs;
2665
2666
2667 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2668}
2669
2670
2671
2672
2673
2674int ice_get_caps(struct ice_hw *hw)
2675{
2676 int status;
2677
2678 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2679 if (status)
2680 return status;
2681
2682 return ice_discover_func_caps(hw, &hw->func_caps);
2683}
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694int
2695ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2696 struct ice_sq_cd *cd)
2697{
2698 struct ice_aqc_manage_mac_write *cmd;
2699 struct ice_aq_desc desc;
2700
2701 cmd = &desc.params.mac_write;
2702 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2703
2704 cmd->flags = flags;
2705 ether_addr_copy(cmd->mac_addr, mac_addr);
2706
2707 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2708}
2709
2710
2711
2712
2713
2714
2715
2716static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
2717{
2718 struct ice_aq_desc desc;
2719
2720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2721 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2722
2723 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2724}
2725
2726
2727
2728
2729
2730
2731
2732
2733void ice_clear_pxe_mode(struct ice_hw *hw)
2734{
2735 if (ice_check_sq_alive(hw, &hw->adminq))
2736 ice_aq_clear_pxe_mode(hw);
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752static u16
2753ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2754{
2755 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2756 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2757
2758 switch (phy_type_low) {
2759 case ICE_PHY_TYPE_LOW_100BASE_TX:
2760 case ICE_PHY_TYPE_LOW_100M_SGMII:
2761 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2762 break;
2763 case ICE_PHY_TYPE_LOW_1000BASE_T:
2764 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2765 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2766 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2767 case ICE_PHY_TYPE_LOW_1G_SGMII:
2768 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2769 break;
2770 case ICE_PHY_TYPE_LOW_2500BASE_T:
2771 case ICE_PHY_TYPE_LOW_2500BASE_X:
2772 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2773 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2774 break;
2775 case ICE_PHY_TYPE_LOW_5GBASE_T:
2776 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2777 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2778 break;
2779 case ICE_PHY_TYPE_LOW_10GBASE_T:
2780 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2781 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2782 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2783 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2784 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2785 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2786 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2787 break;
2788 case ICE_PHY_TYPE_LOW_25GBASE_T:
2789 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2790 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2791 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2792 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2793 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2794 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2795 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2796 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2797 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2798 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2799 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2800 break;
2801 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2802 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2803 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2804 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2805 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2806 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2807 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2808 break;
2809 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2810 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2811 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2812 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2813 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2814 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2815 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2816 case ICE_PHY_TYPE_LOW_50G_AUI2:
2817 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2818 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2819 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2820 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2821 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2822 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2823 case ICE_PHY_TYPE_LOW_50G_AUI1:
2824 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2825 break;
2826 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2827 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2828 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2829 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2830 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2831 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2832 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2833 case ICE_PHY_TYPE_LOW_100G_AUI4:
2834 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2835 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2836 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2837 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2838 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2839 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2840 break;
2841 default:
2842 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2843 break;
2844 }
2845
2846 switch (phy_type_high) {
2847 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2848 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2849 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2850 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2851 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2852 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2853 break;
2854 default:
2855 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2856 break;
2857 }
2858
2859 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2860 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2861 return ICE_AQ_LINK_SPEED_UNKNOWN;
2862 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2863 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2864 return ICE_AQ_LINK_SPEED_UNKNOWN;
2865 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2866 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2867 return speed_phy_type_low;
2868 else
2869 return speed_phy_type_high;
2870}
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887void
2888ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2889 u16 link_speeds_bitmap)
2890{
2891 u64 pt_high;
2892 u64 pt_low;
2893 int index;
2894 u16 speed;
2895
2896
2897 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2898 pt_low = BIT_ULL(index);
2899 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2900
2901 if (link_speeds_bitmap & speed)
2902 *phy_type_low |= BIT_ULL(index);
2903 }
2904
2905
2906 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2907 pt_high = BIT_ULL(index);
2908 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2909
2910 if (link_speeds_bitmap & speed)
2911 *phy_type_high |= BIT_ULL(index);
2912 }
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927int
2928ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2929 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2930{
2931 struct ice_aq_desc desc;
2932 int status;
2933
2934 if (!cfg)
2935 return -EINVAL;
2936
2937
2938 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2939 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2940 cfg->caps);
2941
2942 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2943 }
2944
2945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2946 desc.params.set_phy.lport_num = pi->lport;
2947 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2948
2949 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2950 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2951 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2952 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2953 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2954 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2955 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2956 cfg->low_power_ctrl_an);
2957 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2958 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2959 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2960 cfg->link_fec_opt);
2961
2962 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2963 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2964 status = 0;
2965
2966 if (!status)
2967 pi->phy.curr_user_phy_cfg = *cfg;
2968
2969 return status;
2970}
2971
2972
2973
2974
2975
2976int ice_update_link_info(struct ice_port_info *pi)
2977{
2978 struct ice_link_status *li;
2979 int status;
2980
2981 if (!pi)
2982 return -EINVAL;
2983
2984 li = &pi->phy.link_info;
2985
2986 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2987 if (status)
2988 return status;
2989
2990 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2991 struct ice_aqc_get_phy_caps_data *pcaps;
2992 struct ice_hw *hw;
2993
2994 hw = pi->hw;
2995 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2996 GFP_KERNEL);
2997 if (!pcaps)
2998 return -ENOMEM;
2999
3000 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3001 pcaps, NULL);
3002
3003 devm_kfree(ice_hw_to_dev(hw), pcaps);
3004 }
3005
3006 return status;
3007}
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017static void
3018ice_cache_phy_user_req(struct ice_port_info *pi,
3019 struct ice_phy_cache_mode_data cache_data,
3020 enum ice_phy_cache_mode cache_mode)
3021{
3022 if (!pi)
3023 return;
3024
3025 switch (cache_mode) {
3026 case ICE_FC_MODE:
3027 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3028 break;
3029 case ICE_SPEED_MODE:
3030 pi->phy.curr_user_speed_req =
3031 cache_data.data.curr_user_speed_req;
3032 break;
3033 case ICE_FEC_MODE:
3034 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3035 break;
3036 default:
3037 break;
3038 }
3039}
3040
3041
3042
3043
3044
3045
3046
3047enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3048{
3049 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3050 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3051 return ICE_FC_FULL;
3052
3053 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3054 return ICE_FC_TX_PAUSE;
3055
3056 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3057 return ICE_FC_RX_PAUSE;
3058
3059 return ICE_FC_NONE;
3060}
3061
3062
3063
3064
3065
3066
3067
3068
3069enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3070{
3071 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
3072 return ICE_FEC_AUTO;
3073
3074 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3075 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3076 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3077 ICE_AQC_PHY_FEC_25G_KR_REQ))
3078 return ICE_FEC_BASER;
3079
3080 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3081 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3082 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3083 return ICE_FEC_RS;
3084
3085 return ICE_FEC_NONE;
3086}
3087
3088
3089
3090
3091
3092
3093
3094int
3095ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3096 enum ice_fc_mode req_mode)
3097{
3098 struct ice_phy_cache_mode_data cache_data;
3099 u8 pause_mask = 0x0;
3100
3101 if (!pi || !cfg)
3102 return -EINVAL;
3103
3104 switch (req_mode) {
3105 case ICE_FC_FULL:
3106 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3107 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3108 break;
3109 case ICE_FC_RX_PAUSE:
3110 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3111 break;
3112 case ICE_FC_TX_PAUSE:
3113 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3114 break;
3115 default:
3116 break;
3117 }
3118
3119
3120 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3121 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3122
3123
3124 cfg->caps |= pause_mask;
3125
3126
3127 cache_data.data.curr_user_fc_req = req_mode;
3128 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3129
3130 return 0;
3131}
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141int
3142ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3143{
3144 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3145 struct ice_aqc_get_phy_caps_data *pcaps;
3146 struct ice_hw *hw;
3147 int status;
3148
3149 if (!pi || !aq_failures)
3150 return -EINVAL;
3151
3152 *aq_failures = 0;
3153 hw = pi->hw;
3154
3155 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
3156 if (!pcaps)
3157 return -ENOMEM;
3158
3159
3160 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3161 pcaps, NULL);
3162 if (status) {
3163 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3164 goto out;
3165 }
3166
3167 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3168
3169
3170 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3171 if (status)
3172 goto out;
3173
3174
3175 if (cfg.caps != pcaps->caps) {
3176 int retry_count, retry_max = 10;
3177
3178
3179 if (ena_auto_link_update)
3180 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3181
3182 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3183 if (status) {
3184 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3185 goto out;
3186 }
3187
3188
3189
3190
3191
3192
3193 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3194 status = ice_update_link_info(pi);
3195
3196 if (!status)
3197 break;
3198
3199 mdelay(100);
3200 }
3201
3202 if (status)
3203 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3204 }
3205
3206out:
3207 devm_kfree(ice_hw_to_dev(hw), pcaps);
3208 return status;
3209}
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219bool
3220ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3221 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3222{
3223 u8 caps_mask, cfg_mask;
3224
3225 if (!phy_caps || !phy_cfg)
3226 return false;
3227
3228
3229
3230
3231 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3232 ICE_AQC_GET_PHY_EN_MOD_QUAL);
3233 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3234
3235 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3236 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3237 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3238 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3239 phy_caps->eee_cap != phy_cfg->eee_cap ||
3240 phy_caps->eeer_value != phy_cfg->eeer_value ||
3241 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3242 return false;
3243
3244 return true;
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256void
3257ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3258 struct ice_aqc_get_phy_caps_data *caps,
3259 struct ice_aqc_set_phy_cfg_data *cfg)
3260{
3261 if (!pi || !caps || !cfg)
3262 return;
3263
3264 memset(cfg, 0, sizeof(*cfg));
3265 cfg->phy_type_low = caps->phy_type_low;
3266 cfg->phy_type_high = caps->phy_type_high;
3267 cfg->caps = caps->caps;
3268 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3269 cfg->eee_cap = caps->eee_cap;
3270 cfg->eeer_value = caps->eeer_value;
3271 cfg->link_fec_opt = caps->link_fec_options;
3272 cfg->module_compliance_enforcement =
3273 caps->module_compliance_enforcement;
3274}
3275
3276
3277
3278
3279
3280
3281
3282int
3283ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3284 enum ice_fec_mode fec)
3285{
3286 struct ice_aqc_get_phy_caps_data *pcaps;
3287 struct ice_hw *hw;
3288 int status;
3289
3290 if (!pi || !cfg)
3291 return -EINVAL;
3292
3293 hw = pi->hw;
3294
3295 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3296 if (!pcaps)
3297 return -ENOMEM;
3298
3299 status = ice_aq_get_phy_caps(pi, false,
3300 (ice_fw_supports_report_dflt_cfg(hw) ?
3301 ICE_AQC_REPORT_DFLT_CFG :
3302 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3303 if (status)
3304 goto out;
3305
3306 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3307 cfg->link_fec_opt = pcaps->link_fec_options;
3308
3309 switch (fec) {
3310 case ICE_FEC_BASER:
3311
3312
3313
3314 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3315 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3316 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3317 ICE_AQC_PHY_FEC_25G_KR_REQ;
3318 break;
3319 case ICE_FEC_RS:
3320
3321
3322
3323 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3324 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3325 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3326 break;
3327 case ICE_FEC_NONE:
3328
3329 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3330 break;
3331 case ICE_FEC_AUTO:
3332
3333 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3334 cfg->link_fec_opt |= pcaps->link_fec_options;
3335 break;
3336 default:
3337 status = -EINVAL;
3338 break;
3339 }
3340
3341 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3342 !ice_fw_supports_report_dflt_cfg(hw)) {
3343 struct ice_link_default_override_tlv tlv = { 0 };
3344
3345 status = ice_get_link_default_override(&tlv, pi);
3346 if (status)
3347 goto out;
3348
3349 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3350 (tlv.options & ICE_LINK_OVERRIDE_EN))
3351 cfg->link_fec_opt = tlv.fec_options;
3352 }
3353
3354out:
3355 kfree(pcaps);
3356
3357 return status;
3358}
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3370{
3371 struct ice_phy_info *phy_info;
3372 int status = 0;
3373
3374 if (!pi || !link_up)
3375 return -EINVAL;
3376
3377 phy_info = &pi->phy;
3378
3379 if (phy_info->get_link_info) {
3380 status = ice_update_link_info(pi);
3381
3382 if (status)
3383 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3384 status);
3385 }
3386
3387 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3388
3389 return status;
3390}
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400int
3401ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3402 struct ice_sq_cd *cd)
3403{
3404 struct ice_aqc_restart_an *cmd;
3405 struct ice_aq_desc desc;
3406
3407 cmd = &desc.params.restart_an;
3408
3409 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3410
3411 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3412 cmd->lport_num = pi->lport;
3413 if (ena_link)
3414 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3415 else
3416 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3417
3418 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3419}
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430int
3431ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3432 struct ice_sq_cd *cd)
3433{
3434 struct ice_aqc_set_event_mask *cmd;
3435 struct ice_aq_desc desc;
3436
3437 cmd = &desc.params.set_event_mask;
3438
3439 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3440
3441 cmd->lport_num = port_num;
3442
3443 cmd->event_mask = cpu_to_le16(mask);
3444 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3445}
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455int
3456ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3457{
3458 struct ice_aqc_set_mac_lb *cmd;
3459 struct ice_aq_desc desc;
3460
3461 cmd = &desc.params.set_mac_lb;
3462
3463 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3464 if (ena_lpbk)
3465 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3466
3467 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3468}
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478int
3479ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3480 struct ice_sq_cd *cd)
3481{
3482 struct ice_aqc_set_port_id_led *cmd;
3483 struct ice_hw *hw = pi->hw;
3484 struct ice_aq_desc desc;
3485
3486 cmd = &desc.params.set_port_id_led;
3487
3488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3489
3490 if (is_orig_mode)
3491 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3492 else
3493 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3494
3495 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513int
3514ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3515 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3516 bool write, struct ice_sq_cd *cd)
3517{
3518 struct ice_aqc_sff_eeprom *cmd;
3519 struct ice_aq_desc desc;
3520 int status;
3521
3522 if (!data || (mem_addr & 0xff00))
3523 return -EINVAL;
3524
3525 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3526 cmd = &desc.params.read_write_sff_param;
3527 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3528 cmd->lport_num = (u8)(lport & 0xff);
3529 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3530 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3531 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3532 ((set_page <<
3533 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3534 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3535 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3536 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3537 if (write)
3538 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3539
3540 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3541 return status;
3542}
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552static int
3553__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3554{
3555 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3556 struct ice_aqc_get_set_rss_lut *cmd_resp;
3557 struct ice_aq_desc desc;
3558 int status;
3559 u8 *lut;
3560
3561 if (!params)
3562 return -EINVAL;
3563
3564 vsi_handle = params->vsi_handle;
3565 lut = params->lut;
3566
3567 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3568 return -EINVAL;
3569
3570 lut_size = params->lut_size;
3571 lut_type = params->lut_type;
3572 glob_lut_idx = params->global_lut_id;
3573 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3574
3575 cmd_resp = &desc.params.get_set_rss_lut;
3576
3577 if (set) {
3578 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3579 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3580 } else {
3581 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3582 }
3583
3584 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3585 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3586 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3587 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3588
3589 switch (lut_type) {
3590 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3591 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3592 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3593 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3594 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3595 break;
3596 default:
3597 status = -EINVAL;
3598 goto ice_aq_get_set_rss_lut_exit;
3599 }
3600
3601 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3602 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3603 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3604
3605 if (!set)
3606 goto ice_aq_get_set_rss_lut_send;
3607 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3608 if (!set)
3609 goto ice_aq_get_set_rss_lut_send;
3610 } else {
3611 goto ice_aq_get_set_rss_lut_send;
3612 }
3613
3614
3615 switch (lut_size) {
3616 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3617 break;
3618 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3619 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3620 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3621 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3622 break;
3623 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3624 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3625 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3626 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3627 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3628 break;
3629 }
3630 fallthrough;
3631 default:
3632 status = -EINVAL;
3633 goto ice_aq_get_set_rss_lut_exit;
3634 }
3635
3636ice_aq_get_set_rss_lut_send:
3637 cmd_resp->flags = cpu_to_le16(flags);
3638 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3639
3640ice_aq_get_set_rss_lut_exit:
3641 return status;
3642}
3643
3644
3645
3646
3647
3648
3649
3650
3651int
3652ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3653{
3654 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3655}
3656
3657
3658
3659
3660
3661
3662
3663
3664int
3665ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3666{
3667 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3668}
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679static int
3680__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3681 struct ice_aqc_get_set_rss_keys *key, bool set)
3682{
3683 struct ice_aqc_get_set_rss_key *cmd_resp;
3684 u16 key_size = sizeof(*key);
3685 struct ice_aq_desc desc;
3686
3687 cmd_resp = &desc.params.get_set_rss_key;
3688
3689 if (set) {
3690 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3691 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3692 } else {
3693 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3694 }
3695
3696 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3697 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3698 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3699 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3700
3701 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3702}
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712int
3713ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3714 struct ice_aqc_get_set_rss_keys *key)
3715{
3716 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3717 return -EINVAL;
3718
3719 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3720 key, false);
3721}
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731int
3732ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3733 struct ice_aqc_get_set_rss_keys *keys)
3734{
3735 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3736 return -EINVAL;
3737
3738 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3739 keys, true);
3740}
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763static int
3764ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3765 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3766 struct ice_sq_cd *cd)
3767{
3768 struct ice_aqc_add_tx_qgrp *list;
3769 struct ice_aqc_add_txqs *cmd;
3770 struct ice_aq_desc desc;
3771 u16 i, sum_size = 0;
3772
3773 cmd = &desc.params.add_txqs;
3774
3775 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3776
3777 if (!qg_list)
3778 return -EINVAL;
3779
3780 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3781 return -EINVAL;
3782
3783 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3784 sum_size += struct_size(list, txqs, list->num_txqs);
3785 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3786 list->num_txqs);
3787 }
3788
3789 if (buf_size != sum_size)
3790 return -EINVAL;
3791
3792 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3793
3794 cmd->num_qgrps = num_qgrps;
3795
3796 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3797}
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811static int
3812ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3813 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3814 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3815 struct ice_sq_cd *cd)
3816{
3817 struct ice_aqc_dis_txq_item *item;
3818 struct ice_aqc_dis_txqs *cmd;
3819 struct ice_aq_desc desc;
3820 u16 i, sz = 0;
3821 int status;
3822
3823 cmd = &desc.params.dis_txqs;
3824 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3825
3826
3827 if (!qg_list && !rst_src)
3828 return -EINVAL;
3829
3830 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3831 return -EINVAL;
3832
3833 cmd->num_entries = num_qgrps;
3834
3835 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3836 ICE_AQC_Q_DIS_TIMEOUT_M);
3837
3838 switch (rst_src) {
3839 case ICE_VM_RESET:
3840 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3841 cmd->vmvf_and_timeout |=
3842 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3843 break;
3844 case ICE_VF_RESET:
3845 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3846
3847 cmd->vmvf_and_timeout |=
3848 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3849 ICE_AQC_Q_DIS_VMVF_NUM_M);
3850 break;
3851 case ICE_NO_RESET:
3852 default:
3853 break;
3854 }
3855
3856
3857 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3858
3859 if (!qg_list)
3860 goto do_aq;
3861
3862
3863
3864
3865 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3866
3867 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3868 u16 item_size = struct_size(item, q_id, item->num_qs);
3869
3870
3871 if ((item->num_qs % 2) == 0)
3872 item_size += 2;
3873
3874 sz += item_size;
3875
3876 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3877 }
3878
3879 if (buf_size != sz)
3880 return -EINVAL;
3881
3882do_aq:
3883 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3884 if (status) {
3885 if (!qg_list)
3886 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3887 vmvf_num, hw->adminq.sq_last_status);
3888 else
3889 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3890 le16_to_cpu(qg_list[0].q_id[0]),
3891 hw->adminq.sq_last_status);
3892 }
3893 return status;
3894}
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906static int
3907ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
3908 struct ice_aqc_add_rdma_qset_data *qset_list,
3909 u16 buf_size, struct ice_sq_cd *cd)
3910{
3911 struct ice_aqc_add_rdma_qset_data *list;
3912 struct ice_aqc_add_rdma_qset *cmd;
3913 struct ice_aq_desc desc;
3914 u16 i, sum_size = 0;
3915
3916 cmd = &desc.params.add_rdma_qset;
3917
3918 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
3919
3920 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
3921 return -EINVAL;
3922
3923 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
3924 u16 num_qsets = le16_to_cpu(list->num_qsets);
3925
3926 sum_size += struct_size(list, rdma_qsets, num_qsets);
3927 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
3928 num_qsets);
3929 }
3930
3931 if (buf_size != sum_size)
3932 return -EINVAL;
3933
3934 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3935
3936 cmd->num_qset_grps = num_qset_grps;
3937
3938 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
3939}
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949static void
3950ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3951{
3952 u8 src_byte, dest_byte, mask;
3953 u8 *from, *dest;
3954 u16 shift_width;
3955
3956
3957 from = src_ctx + ce_info->offset;
3958
3959
3960 shift_width = ce_info->lsb % 8;
3961 mask = (u8)(BIT(ce_info->width) - 1);
3962
3963 src_byte = *from;
3964 src_byte &= mask;
3965
3966
3967 mask <<= shift_width;
3968 src_byte <<= shift_width;
3969
3970
3971 dest = dest_ctx + (ce_info->lsb / 8);
3972
3973 memcpy(&dest_byte, dest, sizeof(dest_byte));
3974
3975 dest_byte &= ~mask;
3976 dest_byte |= src_byte;
3977
3978
3979 memcpy(dest, &dest_byte, sizeof(dest_byte));
3980}
3981
3982
3983
3984
3985
3986
3987
3988static void
3989ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990{
3991 u16 src_word, mask;
3992 __le16 dest_word;
3993 u8 *from, *dest;
3994 u16 shift_width;
3995
3996
3997 from = src_ctx + ce_info->offset;
3998
3999
4000 shift_width = ce_info->lsb % 8;
4001 mask = BIT(ce_info->width) - 1;
4002
4003
4004
4005
4006 src_word = *(u16 *)from;
4007 src_word &= mask;
4008
4009
4010 mask <<= shift_width;
4011 src_word <<= shift_width;
4012
4013
4014 dest = dest_ctx + (ce_info->lsb / 8);
4015
4016 memcpy(&dest_word, dest, sizeof(dest_word));
4017
4018 dest_word &= ~(cpu_to_le16(mask));
4019 dest_word |= cpu_to_le16(src_word);
4020
4021
4022 memcpy(dest, &dest_word, sizeof(dest_word));
4023}
4024
4025
4026
4027
4028
4029
4030
4031static void
4032ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4033{
4034 u32 src_dword, mask;
4035 __le32 dest_dword;
4036 u8 *from, *dest;
4037 u16 shift_width;
4038
4039
4040 from = src_ctx + ce_info->offset;
4041
4042
4043 shift_width = ce_info->lsb % 8;
4044
4045
4046
4047
4048
4049 if (ce_info->width < 32)
4050 mask = BIT(ce_info->width) - 1;
4051 else
4052 mask = (u32)~0;
4053
4054
4055
4056
4057 src_dword = *(u32 *)from;
4058 src_dword &= mask;
4059
4060
4061 mask <<= shift_width;
4062 src_dword <<= shift_width;
4063
4064
4065 dest = dest_ctx + (ce_info->lsb / 8);
4066
4067 memcpy(&dest_dword, dest, sizeof(dest_dword));
4068
4069 dest_dword &= ~(cpu_to_le32(mask));
4070 dest_dword |= cpu_to_le32(src_dword);
4071
4072
4073 memcpy(dest, &dest_dword, sizeof(dest_dword));
4074}
4075
4076
4077
4078
4079
4080
4081
4082static void
4083ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4084{
4085 u64 src_qword, mask;
4086 __le64 dest_qword;
4087 u8 *from, *dest;
4088 u16 shift_width;
4089
4090
4091 from = src_ctx + ce_info->offset;
4092
4093
4094 shift_width = ce_info->lsb % 8;
4095
4096
4097
4098
4099
4100 if (ce_info->width < 64)
4101 mask = BIT_ULL(ce_info->width) - 1;
4102 else
4103 mask = (u64)~0;
4104
4105
4106
4107
4108 src_qword = *(u64 *)from;
4109 src_qword &= mask;
4110
4111
4112 mask <<= shift_width;
4113 src_qword <<= shift_width;
4114
4115
4116 dest = dest_ctx + (ce_info->lsb / 8);
4117
4118 memcpy(&dest_qword, dest, sizeof(dest_qword));
4119
4120 dest_qword &= ~(cpu_to_le64(mask));
4121 dest_qword |= cpu_to_le64(src_qword);
4122
4123
4124 memcpy(dest, &dest_qword, sizeof(dest_qword));
4125}
4126
4127
4128
4129
4130
4131
4132
4133
4134int
4135ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4136 const struct ice_ctx_ele *ce_info)
4137{
4138 int f;
4139
4140 for (f = 0; ce_info[f].width; f++) {
4141
4142
4143
4144
4145 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4146 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4147 f, ce_info[f].width, ce_info[f].size_of);
4148 continue;
4149 }
4150 switch (ce_info[f].size_of) {
4151 case sizeof(u8):
4152 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4153 break;
4154 case sizeof(u16):
4155 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4156 break;
4157 case sizeof(u32):
4158 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4159 break;
4160 case sizeof(u64):
4161 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4162 break;
4163 default:
4164 return -EINVAL;
4165 }
4166 }
4167
4168 return 0;
4169}
4170
4171
4172
4173
4174
4175
4176
4177
4178struct ice_q_ctx *
4179ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4180{
4181 struct ice_vsi_ctx *vsi;
4182 struct ice_q_ctx *q_ctx;
4183
4184 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4185 if (!vsi)
4186 return NULL;
4187 if (q_handle >= vsi->num_lan_q_entries[tc])
4188 return NULL;
4189 if (!vsi->lan_q_ctx[tc])
4190 return NULL;
4191 q_ctx = vsi->lan_q_ctx[tc];
4192 return &q_ctx[q_handle];
4193}
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208int
4209ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4210 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4211 struct ice_sq_cd *cd)
4212{
4213 struct ice_aqc_txsched_elem_data node = { 0 };
4214 struct ice_sched_node *parent;
4215 struct ice_q_ctx *q_ctx;
4216 struct ice_hw *hw;
4217 int status;
4218
4219 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4220 return -EIO;
4221
4222 if (num_qgrps > 1 || buf->num_txqs > 1)
4223 return -ENOSPC;
4224
4225 hw = pi->hw;
4226
4227 if (!ice_is_vsi_valid(hw, vsi_handle))
4228 return -EINVAL;
4229
4230 mutex_lock(&pi->sched_lock);
4231
4232 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4233 if (!q_ctx) {
4234 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4235 q_handle);
4236 status = -EINVAL;
4237 goto ena_txq_exit;
4238 }
4239
4240
4241 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4242 ICE_SCHED_NODE_OWNER_LAN);
4243 if (!parent) {
4244 status = -EINVAL;
4245 goto ena_txq_exit;
4246 }
4247
4248 buf->parent_teid = parent->info.node_teid;
4249 node.parent_teid = parent->info.node_teid;
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261 buf->txqs[0].info.valid_sections =
4262 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4263 ICE_AQC_ELEM_VALID_EIR;
4264 buf->txqs[0].info.generic = 0;
4265 buf->txqs[0].info.cir_bw.bw_profile_idx =
4266 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4267 buf->txqs[0].info.cir_bw.bw_alloc =
4268 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4269 buf->txqs[0].info.eir_bw.bw_profile_idx =
4270 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4271 buf->txqs[0].info.eir_bw.bw_alloc =
4272 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4273
4274
4275 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4276 if (status) {
4277 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4278 le16_to_cpu(buf->txqs[0].txq_id),
4279 hw->adminq.sq_last_status);
4280 goto ena_txq_exit;
4281 }
4282
4283 node.node_teid = buf->txqs[0].q_teid;
4284 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4285 q_ctx->q_handle = q_handle;
4286 q_ctx->q_teid = le32_to_cpu(node.node_teid);
4287
4288
4289 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4290 if (!status)
4291 status = ice_sched_replay_q_bw(pi, q_ctx);
4292
4293ena_txq_exit:
4294 mutex_unlock(&pi->sched_lock);
4295 return status;
4296}
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313int
4314ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4315 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4316 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4317 struct ice_sq_cd *cd)
4318{
4319 struct ice_aqc_dis_txq_item *qg_list;
4320 struct ice_q_ctx *q_ctx;
4321 int status = -ENOENT;
4322 struct ice_hw *hw;
4323 u16 i, buf_size;
4324
4325 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4326 return -EIO;
4327
4328 hw = pi->hw;
4329
4330 if (!num_queues) {
4331
4332
4333
4334
4335 if (rst_src)
4336 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4337 vmvf_num, NULL);
4338 return -EIO;
4339 }
4340
4341 buf_size = struct_size(qg_list, q_id, 1);
4342 qg_list = kzalloc(buf_size, GFP_KERNEL);
4343 if (!qg_list)
4344 return -ENOMEM;
4345
4346 mutex_lock(&pi->sched_lock);
4347
4348 for (i = 0; i < num_queues; i++) {
4349 struct ice_sched_node *node;
4350
4351 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4352 if (!node)
4353 continue;
4354 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4355 if (!q_ctx) {
4356 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4357 q_handles[i]);
4358 continue;
4359 }
4360 if (q_ctx->q_handle != q_handles[i]) {
4361 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4362 q_ctx->q_handle, q_handles[i]);
4363 continue;
4364 }
4365 qg_list->parent_teid = node->info.parent_teid;
4366 qg_list->num_qs = 1;
4367 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4368 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4369 vmvf_num, cd);
4370
4371 if (status)
4372 break;
4373 ice_free_sched_node(pi, node);
4374 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4375 }
4376 mutex_unlock(&pi->sched_lock);
4377 kfree(qg_list);
4378 return status;
4379}
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391static int
4392ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4393 u16 *maxqs, u8 owner)
4394{
4395 int status = 0;
4396 u8 i;
4397
4398 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4399 return -EIO;
4400
4401 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4402 return -EINVAL;
4403
4404 mutex_lock(&pi->sched_lock);
4405
4406 ice_for_each_traffic_class(i) {
4407
4408 if (!ice_sched_get_tc_node(pi, i))
4409 continue;
4410
4411 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4412 ice_is_tc_ena(tc_bitmap, i));
4413 if (status)
4414 break;
4415 }
4416
4417 mutex_unlock(&pi->sched_lock);
4418 return status;
4419}
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430int
4431ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4432 u16 *max_lanqs)
4433{
4434 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4435 ICE_SCHED_NODE_OWNER_LAN);
4436}
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447int
4448ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4449 u16 *max_rdmaqs)
4450{
4451 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
4452 ICE_SCHED_NODE_OWNER_RDMA);
4453}
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466int
4467ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4468 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
4469{
4470 struct ice_aqc_txsched_elem_data node = { 0 };
4471 struct ice_aqc_add_rdma_qset_data *buf;
4472 struct ice_sched_node *parent;
4473 struct ice_hw *hw;
4474 u16 i, buf_size;
4475 int ret;
4476
4477 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4478 return -EIO;
4479 hw = pi->hw;
4480
4481 if (!ice_is_vsi_valid(hw, vsi_handle))
4482 return -EINVAL;
4483
4484 buf_size = struct_size(buf, rdma_qsets, num_qsets);
4485 buf = kzalloc(buf_size, GFP_KERNEL);
4486 if (!buf)
4487 return -ENOMEM;
4488 mutex_lock(&pi->sched_lock);
4489
4490 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4491 ICE_SCHED_NODE_OWNER_RDMA);
4492 if (!parent) {
4493 ret = -EINVAL;
4494 goto rdma_error_exit;
4495 }
4496 buf->parent_teid = parent->info.node_teid;
4497 node.parent_teid = parent->info.node_teid;
4498
4499 buf->num_qsets = cpu_to_le16(num_qsets);
4500 for (i = 0; i < num_qsets; i++) {
4501 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
4502 buf->rdma_qsets[i].info.valid_sections =
4503 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4504 ICE_AQC_ELEM_VALID_EIR;
4505 buf->rdma_qsets[i].info.generic = 0;
4506 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
4507 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4508 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
4509 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4510 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
4511 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
4512 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
4513 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
4514 }
4515 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
4516 if (ret) {
4517 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
4518 goto rdma_error_exit;
4519 }
4520 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4521 for (i = 0; i < num_qsets; i++) {
4522 node.node_teid = buf->rdma_qsets[i].qset_teid;
4523 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
4524 &node);
4525 if (ret)
4526 break;
4527 qset_teid[i] = le32_to_cpu(node.node_teid);
4528 }
4529rdma_error_exit:
4530 mutex_unlock(&pi->sched_lock);
4531 kfree(buf);
4532 return ret;
4533}
4534
4535
4536
4537
4538
4539
4540
4541
4542int
4543ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
4544 u16 *q_id)
4545{
4546 struct ice_aqc_dis_txq_item *qg_list;
4547 struct ice_hw *hw;
4548 int status = 0;
4549 u16 qg_size;
4550 int i;
4551
4552 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4553 return -EIO;
4554
4555 hw = pi->hw;
4556
4557 qg_size = struct_size(qg_list, q_id, 1);
4558 qg_list = kzalloc(qg_size, GFP_KERNEL);
4559 if (!qg_list)
4560 return -ENOMEM;
4561
4562 mutex_lock(&pi->sched_lock);
4563
4564 for (i = 0; i < count; i++) {
4565 struct ice_sched_node *node;
4566
4567 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
4568 if (!node)
4569 continue;
4570
4571 qg_list->parent_teid = node->info.parent_teid;
4572 qg_list->num_qs = 1;
4573 qg_list->q_id[0] =
4574 cpu_to_le16(q_id[i] |
4575 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
4576
4577 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
4578 ICE_NO_RESET, 0, NULL);
4579 if (status)
4580 break;
4581
4582 ice_free_sched_node(pi, node);
4583 }
4584
4585 mutex_unlock(&pi->sched_lock);
4586 kfree(qg_list);
4587 return status;
4588}
4589
4590
4591
4592
4593
4594
4595
4596static int ice_replay_pre_init(struct ice_hw *hw)
4597{
4598 struct ice_switch_info *sw = hw->switch_info;
4599 u8 i;
4600
4601
4602 ice_rm_all_sw_replay_rule_info(hw);
4603
4604
4605
4606
4607 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4608 list_replace_init(&sw->recp_list[i].filt_rules,
4609 &sw->recp_list[i].filt_replay_rules);
4610 ice_sched_replay_agg_vsi_preinit(hw);
4611
4612 return 0;
4613}
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4624{
4625 int status;
4626
4627 if (!ice_is_vsi_valid(hw, vsi_handle))
4628 return -EINVAL;
4629
4630
4631 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4632 status = ice_replay_pre_init(hw);
4633 if (status)
4634 return status;
4635 }
4636
4637 status = ice_replay_rss_cfg(hw, vsi_handle);
4638 if (status)
4639 return status;
4640
4641 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4642 if (!status)
4643 status = ice_replay_vsi_agg(hw, vsi_handle);
4644 return status;
4645}
4646
4647
4648
4649
4650
4651
4652
4653void ice_replay_post(struct ice_hw *hw)
4654{
4655
4656 ice_rm_all_sw_replay_rule_info(hw);
4657 ice_sched_replay_agg(hw);
4658}
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668void
4669ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4670 u64 *prev_stat, u64 *cur_stat)
4671{
4672 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4673
4674
4675
4676
4677
4678
4679 if (!prev_stat_loaded) {
4680 *prev_stat = new_data;
4681 return;
4682 }
4683
4684
4685
4686
4687 if (new_data >= *prev_stat)
4688 *cur_stat += new_data - *prev_stat;
4689 else
4690
4691 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4692
4693
4694 *prev_stat = new_data;
4695}
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705void
4706ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4707 u64 *prev_stat, u64 *cur_stat)
4708{
4709 u32 new_data;
4710
4711 new_data = rd32(hw, reg);
4712
4713
4714
4715
4716
4717
4718 if (!prev_stat_loaded) {
4719 *prev_stat = new_data;
4720 return;
4721 }
4722
4723
4724
4725
4726 if (new_data >= *prev_stat)
4727 *cur_stat += new_data - *prev_stat;
4728 else
4729
4730 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4731
4732
4733 *prev_stat = new_data;
4734}
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744int
4745ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4746 struct ice_aqc_txsched_elem_data *buf)
4747{
4748 u16 buf_size, num_elem_ret = 0;
4749 int status;
4750
4751 buf_size = sizeof(*buf);
4752 memset(buf, 0, buf_size);
4753 buf->node_teid = cpu_to_le32(node_teid);
4754 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4755 NULL);
4756 if (status || num_elem_ret != 1)
4757 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4758 return status;
4759}
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776int
4777ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4778 u32 value, struct ice_sq_cd *cd)
4779{
4780 struct ice_aqc_driver_shared_params *cmd;
4781 struct ice_aq_desc desc;
4782
4783 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4784 return -EIO;
4785
4786 cmd = &desc.params.drv_shared_params;
4787
4788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4789
4790 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
4791 cmd->param_indx = idx;
4792 cmd->param_val = cpu_to_le32(value);
4793
4794 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4795}
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809int
4810ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
4811 u32 *value, struct ice_sq_cd *cd)
4812{
4813 struct ice_aqc_driver_shared_params *cmd;
4814 struct ice_aq_desc desc;
4815 int status;
4816
4817 if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
4818 return -EIO;
4819
4820 cmd = &desc.params.drv_shared_params;
4821
4822 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
4823
4824 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
4825 cmd->param_indx = idx;
4826
4827 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4828 if (status)
4829 return status;
4830
4831 *value = le32_to_cpu(cmd->param_val);
4832
4833 return 0;
4834}
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846int
4847ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
4848 struct ice_sq_cd *cd)
4849{
4850 struct ice_aqc_gpio *cmd;
4851 struct ice_aq_desc desc;
4852
4853 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
4854 cmd = &desc.params.read_write_gpio;
4855 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4856 cmd->gpio_num = pin_idx;
4857 cmd->gpio_val = value ? 1 : 0;
4858
4859 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4860}
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873int
4874ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
4875 bool *value, struct ice_sq_cd *cd)
4876{
4877 struct ice_aqc_gpio *cmd;
4878 struct ice_aq_desc desc;
4879 int status;
4880
4881 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
4882 cmd = &desc.params.read_write_gpio;
4883 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
4884 cmd->gpio_num = pin_idx;
4885
4886 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4887 if (status)
4888 return status;
4889
4890 *value = !!cmd->gpio_val;
4891 return 0;
4892}
4893
4894
4895
4896
4897
4898
4899
4900bool ice_fw_supports_link_override(struct ice_hw *hw)
4901{
4902 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4903 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4904 return true;
4905 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4906 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4907 return true;
4908 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4909 return true;
4910 }
4911
4912 return false;
4913}
4914
4915
4916
4917
4918
4919
4920
4921
4922int
4923ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4924 struct ice_port_info *pi)
4925{
4926 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4927 struct ice_hw *hw = pi->hw;
4928 int status;
4929
4930 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4931 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4932 if (status) {
4933 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4934 return status;
4935 }
4936
4937
4938 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4939 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4940
4941
4942 status = ice_read_sr_word(hw, tlv_start, &buf);
4943 if (status) {
4944 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4945 return status;
4946 }
4947 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4948 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4949 ICE_LINK_OVERRIDE_PHY_CFG_S;
4950
4951
4952 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4953 status = ice_read_sr_word(hw, offset, &buf);
4954 if (status) {
4955 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4956 return status;
4957 }
4958 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4959
4960
4961 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4962 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4963 status = ice_read_sr_word(hw, (offset + i), &buf);
4964 if (status) {
4965 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4966 return status;
4967 }
4968
4969 ldo->phy_type_low |= ((u64)buf << (i * 16));
4970 }
4971
4972
4973 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4974 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4975 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4976 status = ice_read_sr_word(hw, (offset + i), &buf);
4977 if (status) {
4978 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4979 return status;
4980 }
4981
4982 ldo->phy_type_high |= ((u64)buf << (i * 16));
4983 }
4984
4985 return status;
4986}
4987
4988
4989
4990
4991
4992bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4993{
4994 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4995 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4996 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4997 ICE_AQC_PHY_AN_EN_CLAUSE37))
4998 return true;
4999
5000 return false;
5001}
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013int
5014ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5015 struct ice_sq_cd *cd)
5016{
5017 struct ice_aqc_lldp_set_local_mib *cmd;
5018 struct ice_aq_desc desc;
5019
5020 cmd = &desc.params.lldp_set_mib;
5021
5022 if (buf_size == 0 || !buf)
5023 return -EINVAL;
5024
5025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5026
5027 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
5028 desc.datalen = cpu_to_le16(buf_size);
5029
5030 cmd->type = mib_type;
5031 cmd->length = cpu_to_le16(buf_size);
5032
5033 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5034}
5035
5036
5037
5038
5039
5040bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5041{
5042 if (hw->mac_type != ICE_MAC_E810)
5043 return false;
5044
5045 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5046 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5047 return true;
5048 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5049 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5050 return true;
5051 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5052 return true;
5053 }
5054 return false;
5055}
5056
5057
5058
5059
5060
5061
5062
5063int
5064ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5065{
5066 struct ice_aqc_lldp_filter_ctrl *cmd;
5067 struct ice_aq_desc desc;
5068
5069 cmd = &desc.params.lldp_filter_ctrl;
5070
5071 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5072
5073 if (add)
5074 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5075 else
5076 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5077
5078 cmd->vsi_num = cpu_to_le16(vsi_num);
5079
5080 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
5081}
5082
5083
5084
5085
5086
5087
5088
5089bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
5090{
5091 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5092 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
5093 return true;
5094 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
5095 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
5096 return true;
5097 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
5098 return true;
5099 }
5100 return false;
5101}
5102