1
2
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7#include "ice_flow.h"
8
9#define ICE_PF_RESET_WAIT_COUNT 300
10
11
12
13
14
15
16
17
18static enum ice_status ice_set_mac_type(struct ice_hw *hw)
19{
20 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
21 return ICE_ERR_DEVICE_NOT_SUPPORTED;
22
23 switch (hw->device_id) {
24 case ICE_DEV_ID_E810C_BACKPLANE:
25 case ICE_DEV_ID_E810C_QSFP:
26 case ICE_DEV_ID_E810C_SFP:
27 case ICE_DEV_ID_E810_XXV_SFP:
28 hw->mac_type = ICE_MAC_E810;
29 break;
30 case ICE_DEV_ID_E823C_10G_BASE_T:
31 case ICE_DEV_ID_E823C_BACKPLANE:
32 case ICE_DEV_ID_E823C_QSFP:
33 case ICE_DEV_ID_E823C_SFP:
34 case ICE_DEV_ID_E823C_SGMII:
35 case ICE_DEV_ID_E822C_10G_BASE_T:
36 case ICE_DEV_ID_E822C_BACKPLANE:
37 case ICE_DEV_ID_E822C_QSFP:
38 case ICE_DEV_ID_E822C_SFP:
39 case ICE_DEV_ID_E822C_SGMII:
40 case ICE_DEV_ID_E822L_10G_BASE_T:
41 case ICE_DEV_ID_E822L_BACKPLANE:
42 case ICE_DEV_ID_E822L_SFP:
43 case ICE_DEV_ID_E822L_SGMII:
44 case ICE_DEV_ID_E823L_10G_BASE_T:
45 case ICE_DEV_ID_E823L_1GBE:
46 case ICE_DEV_ID_E823L_BACKPLANE:
47 case ICE_DEV_ID_E823L_QSFP:
48 case ICE_DEV_ID_E823L_SFP:
49 hw->mac_type = ICE_MAC_GENERIC;
50 break;
51 default:
52 hw->mac_type = ICE_MAC_UNKNOWN;
53 break;
54 }
55
56 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
57 return 0;
58}
59
60
61
62
63
64
65
66
67enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
68{
69 struct ice_aq_desc desc;
70
71 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
72
73 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
74}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91static enum ice_status
92ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
93 struct ice_sq_cd *cd)
94{
95 struct ice_aqc_manage_mac_read_resp *resp;
96 struct ice_aqc_manage_mac_read *cmd;
97 struct ice_aq_desc desc;
98 enum ice_status status;
99 u16 flags;
100 u8 i;
101
102 cmd = &desc.params.mac_read;
103
104 if (buf_size < sizeof(*resp))
105 return ICE_ERR_BUF_TOO_SHORT;
106
107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
108
109 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
110 if (status)
111 return status;
112
113 resp = buf;
114 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
115
116 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
117 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
118 return ICE_ERR_CFG;
119 }
120
121
122 for (i = 0; i < cmd->num_addr; i++)
123 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
124 ether_addr_copy(hw->port_info->mac.lan_addr,
125 resp[i].mac_addr);
126 ether_addr_copy(hw->port_info->mac.perm_addr,
127 resp[i].mac_addr);
128 break;
129 }
130
131 return 0;
132}
133
134
135
136
137
138
139
140
141
142
143
144enum ice_status
145ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
146 struct ice_aqc_get_phy_caps_data *pcaps,
147 struct ice_sq_cd *cd)
148{
149 struct ice_aqc_get_phy_caps *cmd;
150 u16 pcaps_size = sizeof(*pcaps);
151 struct ice_aq_desc desc;
152 enum ice_status status;
153 struct ice_hw *hw;
154
155 cmd = &desc.params.get_phy;
156
157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 return ICE_ERR_PARAM;
159 hw = pi->hw;
160
161 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
162 !ice_fw_supports_report_dflt_cfg(hw))
163 return ICE_ERR_PARAM;
164
165 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
166
167 if (qual_mods)
168 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
169
170 cmd->param0 |= cpu_to_le16(report_mode);
171 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
172
173 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
174 report_mode);
175 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
176 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
177 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
178 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
179 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
180 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
181 pcaps->low_power_ctrl_an);
182 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
183 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
184 pcaps->eeer_value);
185 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
186 pcaps->link_fec_options);
187 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
188 pcaps->module_compliance_enforcement);
189 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
190 pcaps->extended_compliance_code);
191 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
192 pcaps->module_type[0]);
193 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
194 pcaps->module_type[1]);
195 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
196 pcaps->module_type[2]);
197
198 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
199 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
200 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
201 memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
202 sizeof(pi->phy.link_info.module_type));
203 }
204
205 return status;
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220static enum ice_status
221ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
222 struct ice_sq_cd *cd)
223{
224 struct ice_aqc_get_link_topo *cmd;
225 struct ice_aq_desc desc;
226
227 cmd = &desc.params.get_link_topo;
228
229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
230
231 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
232 ICE_AQC_LINK_TOPO_NODE_CTX_S);
233
234
235 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
236
237 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
238}
239
240
241
242
243
244
245
246
247static bool ice_is_media_cage_present(struct ice_port_info *pi)
248{
249
250
251
252
253 return !ice_aq_get_link_topo_handle(pi,
254 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
255 NULL);
256}
257
258
259
260
261
262static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
263{
264 struct ice_link_status *hw_link_info;
265
266 if (!pi)
267 return ICE_MEDIA_UNKNOWN;
268
269 hw_link_info = &pi->phy.link_info;
270 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
271
272 return ICE_MEDIA_UNKNOWN;
273
274 if (hw_link_info->phy_type_low) {
275
276
277
278
279
280 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
281 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
282 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
283 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
284 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
285 return ICE_MEDIA_DA;
286
287 switch (hw_link_info->phy_type_low) {
288 case ICE_PHY_TYPE_LOW_1000BASE_SX:
289 case ICE_PHY_TYPE_LOW_1000BASE_LX:
290 case ICE_PHY_TYPE_LOW_10GBASE_SR:
291 case ICE_PHY_TYPE_LOW_10GBASE_LR:
292 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
293 case ICE_PHY_TYPE_LOW_25GBASE_SR:
294 case ICE_PHY_TYPE_LOW_25GBASE_LR:
295 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
296 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
297 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
298 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
299 case ICE_PHY_TYPE_LOW_50GBASE_SR:
300 case ICE_PHY_TYPE_LOW_50GBASE_FR:
301 case ICE_PHY_TYPE_LOW_50GBASE_LR:
302 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
303 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
304 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
305 case ICE_PHY_TYPE_LOW_100GBASE_DR:
306 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
307 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
308 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
309 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
310 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
311 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
312 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
313 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
314 return ICE_MEDIA_FIBER;
315 case ICE_PHY_TYPE_LOW_100BASE_TX:
316 case ICE_PHY_TYPE_LOW_1000BASE_T:
317 case ICE_PHY_TYPE_LOW_2500BASE_T:
318 case ICE_PHY_TYPE_LOW_5GBASE_T:
319 case ICE_PHY_TYPE_LOW_10GBASE_T:
320 case ICE_PHY_TYPE_LOW_25GBASE_T:
321 return ICE_MEDIA_BASET;
322 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
323 case ICE_PHY_TYPE_LOW_25GBASE_CR:
324 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
325 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
326 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
327 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
328 case ICE_PHY_TYPE_LOW_50GBASE_CP:
329 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
330 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
331 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
332 return ICE_MEDIA_DA;
333 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
334 case ICE_PHY_TYPE_LOW_40G_XLAUI:
335 case ICE_PHY_TYPE_LOW_50G_LAUI2:
336 case ICE_PHY_TYPE_LOW_50G_AUI2:
337 case ICE_PHY_TYPE_LOW_50G_AUI1:
338 case ICE_PHY_TYPE_LOW_100G_AUI4:
339 case ICE_PHY_TYPE_LOW_100G_CAUI4:
340 if (ice_is_media_cage_present(pi))
341 return ICE_MEDIA_DA;
342 fallthrough;
343 case ICE_PHY_TYPE_LOW_1000BASE_KX:
344 case ICE_PHY_TYPE_LOW_2500BASE_KX:
345 case ICE_PHY_TYPE_LOW_2500BASE_X:
346 case ICE_PHY_TYPE_LOW_5GBASE_KR:
347 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
348 case ICE_PHY_TYPE_LOW_25GBASE_KR:
349 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
350 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
351 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
352 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
353 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
354 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
355 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
356 return ICE_MEDIA_BACKPLANE;
357 }
358 } else {
359 switch (hw_link_info->phy_type_high) {
360 case ICE_PHY_TYPE_HIGH_100G_AUI2:
361 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
362 if (ice_is_media_cage_present(pi))
363 return ICE_MEDIA_DA;
364 fallthrough;
365 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
366 return ICE_MEDIA_BACKPLANE;
367 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
368 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
369 return ICE_MEDIA_FIBER;
370 }
371 }
372 return ICE_MEDIA_UNKNOWN;
373}
374
375
376
377
378
379
380
381
382
383
384enum ice_status
385ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
386 struct ice_link_status *link, struct ice_sq_cd *cd)
387{
388 struct ice_aqc_get_link_status_data link_data = { 0 };
389 struct ice_aqc_get_link_status *resp;
390 struct ice_link_status *li_old, *li;
391 enum ice_media_type *hw_media_type;
392 struct ice_fc_info *hw_fc_info;
393 bool tx_pause, rx_pause;
394 struct ice_aq_desc desc;
395 enum ice_status status;
396 struct ice_hw *hw;
397 u16 cmd_flags;
398
399 if (!pi)
400 return ICE_ERR_PARAM;
401 hw = pi->hw;
402 li_old = &pi->phy.link_info_old;
403 hw_media_type = &pi->phy.media_type;
404 li = &pi->phy.link_info;
405 hw_fc_info = &pi->fc;
406
407 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
408 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
409 resp = &desc.params.get_link_status;
410 resp->cmd_flags = cpu_to_le16(cmd_flags);
411 resp->lport_num = pi->lport;
412
413 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
414
415 if (status)
416 return status;
417
418
419 *li_old = *li;
420
421
422 li->link_speed = le16_to_cpu(link_data.link_speed);
423 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
424 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
425 *hw_media_type = ice_get_media_type(pi);
426 li->link_info = link_data.link_info;
427 li->an_info = link_data.an_info;
428 li->ext_info = link_data.ext_info;
429 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
430 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
431 li->topo_media_conflict = link_data.topo_media_conflict;
432 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
433 ICE_AQ_CFG_PACING_TYPE_M);
434
435
436 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
437 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
438 if (tx_pause && rx_pause)
439 hw_fc_info->current_mode = ICE_FC_FULL;
440 else if (tx_pause)
441 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
442 else if (rx_pause)
443 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
444 else
445 hw_fc_info->current_mode = ICE_FC_NONE;
446
447 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
448
449 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
450 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
451 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
452 (unsigned long long)li->phy_type_low);
453 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
454 (unsigned long long)li->phy_type_high);
455 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
456 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
457 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
458 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
459 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
460 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
461 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
462 li->max_frame_size);
463 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
464
465
466 if (link)
467 *link = *li;
468
469
470 pi->phy.get_link_info = false;
471
472 return 0;
473}
474
475
476
477
478
479
480
481
482
483static void
484ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
485 struct ice_aqc_set_mac_cfg *cmd)
486{
487 u16 fc_thres_val, tx_timer_val;
488 u32 val;
489
490
491
492
493
494
495
496
497#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
498
499
500 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
501 tx_timer_val = val &
502 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
503 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
504
505
506 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
507 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
508
509 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
510}
511
512
513
514
515
516
517
518
519
520enum ice_status
521ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
522{
523 struct ice_aqc_set_mac_cfg *cmd;
524 struct ice_aq_desc desc;
525
526 cmd = &desc.params.set_mac_cfg;
527
528 if (max_frame_size == 0)
529 return ICE_ERR_PARAM;
530
531 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
532
533 cmd->max_frame_size = cpu_to_le16(max_frame_size);
534
535 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
536
537 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
538}
539
540
541
542
543
544static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
545{
546 struct ice_switch_info *sw;
547 enum ice_status status;
548
549 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
550 sizeof(*hw->switch_info), GFP_KERNEL);
551 sw = hw->switch_info;
552
553 if (!sw)
554 return ICE_ERR_NO_MEMORY;
555
556 INIT_LIST_HEAD(&sw->vsi_list_map_head);
557
558 status = ice_init_def_sw_recp(hw);
559 if (status) {
560 devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
561 return status;
562 }
563 return 0;
564}
565
566
567
568
569
570static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
571{
572 struct ice_switch_info *sw = hw->switch_info;
573 struct ice_vsi_list_map_info *v_pos_map;
574 struct ice_vsi_list_map_info *v_tmp_map;
575 struct ice_sw_recipe *recps;
576 u8 i;
577
578 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
579 list_entry) {
580 list_del(&v_pos_map->list_entry);
581 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
582 }
583 recps = hw->switch_info->recp_list;
584 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
585 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
586
587 recps[i].root_rid = i;
588 mutex_destroy(&recps[i].filt_rule_lock);
589 list_for_each_entry_safe(lst_itr, tmp_entry,
590 &recps[i].filt_rules, list_entry) {
591 list_del(&lst_itr->list_entry);
592 devm_kfree(ice_hw_to_dev(hw), lst_itr);
593 }
594 }
595 ice_rm_all_sw_replay_rule_info(hw);
596 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
597 devm_kfree(ice_hw_to_dev(hw), sw);
598}
599
600
601
602
603
604static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
605{
606 struct ice_aq_desc desc;
607 enum ice_status status;
608 __le16 *config;
609 u16 size;
610
611 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
612 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
613 if (!config)
614 return ICE_ERR_NO_MEMORY;
615
616 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
617
618 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
619 if (!status) {
620 u16 i;
621
622
623 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
624 u16 v, m, flgs;
625
626 v = le16_to_cpu(config[i]);
627 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
628 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
629
630 if (m < ICE_AQC_FW_LOG_ID_MAX)
631 hw->fw_log.evnts[m].cur = flgs;
632 }
633 }
634
635 devm_kfree(ice_hw_to_dev(hw), config);
636
637 return status;
638}
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
677{
678 struct ice_aqc_fw_logging *cmd;
679 enum ice_status status = 0;
680 u16 i, chgs = 0, len = 0;
681 struct ice_aq_desc desc;
682 __le16 *data = NULL;
683 u8 actv_evnts = 0;
684 void *buf = NULL;
685
686 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
687 return 0;
688
689
690 if (!enable &&
691 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
692 return 0;
693
694
695 status = ice_get_fw_log_cfg(hw);
696 if (status)
697 return status;
698
699 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
700 cmd = &desc.params.fw_logging;
701
702
703 if (hw->fw_log.cq_en)
704 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
705
706 if (hw->fw_log.uart_en)
707 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
708
709 if (enable) {
710
711
712
713 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
714 u16 val;
715
716
717 actv_evnts |= hw->fw_log.evnts[i].cfg;
718
719 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
720 continue;
721
722 if (!data) {
723 data = devm_kcalloc(ice_hw_to_dev(hw),
724 ICE_AQC_FW_LOG_ID_MAX,
725 sizeof(*data),
726 GFP_KERNEL);
727 if (!data)
728 return ICE_ERR_NO_MEMORY;
729 }
730
731 val = i << ICE_AQC_FW_LOG_ID_S;
732 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
733 data[chgs++] = cpu_to_le16(val);
734 }
735
736
737
738
739
740 if (actv_evnts) {
741
742 if (!chgs)
743 goto out;
744
745 if (hw->fw_log.cq_en)
746 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
747
748 if (hw->fw_log.uart_en)
749 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
750
751 buf = data;
752 len = sizeof(*data) * chgs;
753 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
754 }
755 }
756
757 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
758 if (!status) {
759
760
761
762
763
764
765 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
766
767 hw->fw_log.actv_evnts = actv_evnts;
768 for (i = 0; i < cnt; i++) {
769 u16 v, m;
770
771 if (!enable) {
772
773
774
775
776
777
778 hw->fw_log.evnts[i].cur = 0;
779 continue;
780 }
781
782 v = le16_to_cpu(data[i]);
783 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
784 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
785 }
786 }
787
788out:
789 if (data)
790 devm_kfree(ice_hw_to_dev(hw), data);
791
792 return status;
793}
794
795
796
797
798
799
800
801
802
803void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
804{
805 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
806 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
807 le16_to_cpu(desc->datalen));
808 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
809}
810
811
812
813
814
815
816
817
818static void ice_get_itr_intrl_gran(struct ice_hw *hw)
819{
820 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
821 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
822 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
823
824 switch (max_agg_bw) {
825 case ICE_MAX_AGG_BW_200G:
826 case ICE_MAX_AGG_BW_100G:
827 case ICE_MAX_AGG_BW_50G:
828 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
829 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
830 break;
831 case ICE_MAX_AGG_BW_25G:
832 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
833 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
834 break;
835 }
836}
837
838
839
840
841
842enum ice_status ice_init_hw(struct ice_hw *hw)
843{
844 struct ice_aqc_get_phy_caps_data *pcaps;
845 enum ice_status status;
846 u16 mac_buf_len;
847 void *mac_buf;
848
849
850 status = ice_set_mac_type(hw);
851 if (status)
852 return status;
853
854 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
855 PF_FUNC_RID_FUNC_NUM_M) >>
856 PF_FUNC_RID_FUNC_NUM_S;
857
858 status = ice_reset(hw, ICE_RESET_PFR);
859 if (status)
860 return status;
861
862 ice_get_itr_intrl_gran(hw);
863
864 status = ice_create_all_ctrlq(hw);
865 if (status)
866 goto err_unroll_cqinit;
867
868
869 status = ice_cfg_fw_log(hw, true);
870 if (status)
871 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
872
873 status = ice_clear_pf_cfg(hw);
874 if (status)
875 goto err_unroll_cqinit;
876
877
878 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
879 INIT_LIST_HEAD(&hw->fdir_list_head);
880
881 ice_clear_pxe_mode(hw);
882
883 status = ice_init_nvm(hw);
884 if (status)
885 goto err_unroll_cqinit;
886
887 status = ice_get_caps(hw);
888 if (status)
889 goto err_unroll_cqinit;
890
891 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
892 sizeof(*hw->port_info), GFP_KERNEL);
893 if (!hw->port_info) {
894 status = ICE_ERR_NO_MEMORY;
895 goto err_unroll_cqinit;
896 }
897
898
899 hw->port_info->hw = hw;
900
901
902 status = ice_get_initial_sw_cfg(hw);
903 if (status)
904 goto err_unroll_alloc;
905
906 hw->evb_veb = true;
907
908
909 status = ice_sched_query_res_alloc(hw);
910 if (status) {
911 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
912 goto err_unroll_alloc;
913 }
914 ice_sched_get_psm_clk_freq(hw);
915
916
917 status = ice_sched_init_port(hw->port_info);
918 if (status)
919 goto err_unroll_sched;
920
921 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
922 if (!pcaps) {
923 status = ICE_ERR_NO_MEMORY;
924 goto err_unroll_sched;
925 }
926
927
928 status = ice_aq_get_phy_caps(hw->port_info, false,
929 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
930 NULL);
931 devm_kfree(ice_hw_to_dev(hw), pcaps);
932 if (status)
933 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
934 status);
935
936
937 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
938 if (status)
939 goto err_unroll_sched;
940
941
942 if (!hw->sw_entry_point_layer) {
943 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
944 status = ICE_ERR_CFG;
945 goto err_unroll_sched;
946 }
947 INIT_LIST_HEAD(&hw->agg_list);
948
949 if (!hw->max_burst_size)
950 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
951
952 status = ice_init_fltr_mgmt_struct(hw);
953 if (status)
954 goto err_unroll_sched;
955
956
957
958 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
959 sizeof(struct ice_aqc_manage_mac_read_resp),
960 GFP_KERNEL);
961 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
962
963 if (!mac_buf) {
964 status = ICE_ERR_NO_MEMORY;
965 goto err_unroll_fltr_mgmt_struct;
966 }
967
968 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
969 devm_kfree(ice_hw_to_dev(hw), mac_buf);
970
971 if (status)
972 goto err_unroll_fltr_mgmt_struct;
973
974 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
975 if (status)
976 goto err_unroll_fltr_mgmt_struct;
977
978 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base);
979 if (status)
980 goto err_unroll_fltr_mgmt_struct;
981 status = ice_init_hw_tbls(hw);
982 if (status)
983 goto err_unroll_fltr_mgmt_struct;
984 mutex_init(&hw->tnl_lock);
985 return 0;
986
987err_unroll_fltr_mgmt_struct:
988 ice_cleanup_fltr_mgmt_struct(hw);
989err_unroll_sched:
990 ice_sched_cleanup_all(hw);
991err_unroll_alloc:
992 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
993err_unroll_cqinit:
994 ice_destroy_all_ctrlq(hw);
995 return status;
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006void ice_deinit_hw(struct ice_hw *hw)
1007{
1008 ice_free_fd_res_cntr(hw, hw->fd_ctr_base);
1009 ice_cleanup_fltr_mgmt_struct(hw);
1010
1011 ice_sched_cleanup_all(hw);
1012 ice_sched_clear_agg(hw);
1013 ice_free_seg(hw);
1014 ice_free_hw_tbls(hw);
1015 mutex_destroy(&hw->tnl_lock);
1016
1017 if (hw->port_info) {
1018 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
1019 hw->port_info = NULL;
1020 }
1021
1022
1023 ice_cfg_fw_log(hw, false);
1024 ice_destroy_all_ctrlq(hw);
1025
1026
1027 ice_clear_all_vsi_ctx(hw);
1028}
1029
1030
1031
1032
1033
1034enum ice_status ice_check_reset(struct ice_hw *hw)
1035{
1036 u32 cnt, reg = 0, grst_timeout, uld_mask;
1037
1038
1039
1040
1041
1042 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1043 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1044
1045 for (cnt = 0; cnt < grst_timeout; cnt++) {
1046 mdelay(100);
1047 reg = rd32(hw, GLGEN_RSTAT);
1048 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1049 break;
1050 }
1051
1052 if (cnt == grst_timeout) {
1053 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1054 return ICE_ERR_RESET_FAILED;
1055 }
1056
1057#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1058 GLNVM_ULD_PCIER_DONE_1_M |\
1059 GLNVM_ULD_CORER_DONE_M |\
1060 GLNVM_ULD_GLOBR_DONE_M |\
1061 GLNVM_ULD_POR_DONE_M |\
1062 GLNVM_ULD_POR_DONE_1_M |\
1063 GLNVM_ULD_PCIER_DONE_2_M)
1064
1065 uld_mask = ICE_RESET_DONE_MASK;
1066
1067
1068 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
1069 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1070 if (reg == uld_mask) {
1071 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1072 break;
1073 }
1074 mdelay(10);
1075 }
1076
1077 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1078 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1079 reg);
1080 return ICE_ERR_RESET_FAILED;
1081 }
1082
1083 return 0;
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093static enum ice_status ice_pf_reset(struct ice_hw *hw)
1094{
1095 u32 cnt, reg;
1096
1097
1098
1099
1100
1101
1102 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1103 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1104
1105 if (ice_check_reset(hw))
1106 return ICE_ERR_RESET_FAILED;
1107
1108 return 0;
1109 }
1110
1111
1112 reg = rd32(hw, PFGEN_CTRL);
1113
1114 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1115
1116
1117
1118
1119
1120 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
1121 ICE_PF_RESET_WAIT_COUNT; cnt++) {
1122 reg = rd32(hw, PFGEN_CTRL);
1123 if (!(reg & PFGEN_CTRL_PFSWR_M))
1124 break;
1125
1126 mdelay(1);
1127 }
1128
1129 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
1130 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1131 return ICE_ERR_RESET_FAILED;
1132 }
1133
1134 return 0;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1150{
1151 u32 val = 0;
1152
1153 switch (req) {
1154 case ICE_RESET_PFR:
1155 return ice_pf_reset(hw);
1156 case ICE_RESET_CORER:
1157 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1158 val = GLGEN_RTRIG_CORER_M;
1159 break;
1160 case ICE_RESET_GLOBR:
1161 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1162 val = GLGEN_RTRIG_GLOBR_M;
1163 break;
1164 default:
1165 return ICE_ERR_PARAM;
1166 }
1167
1168 val |= rd32(hw, GLGEN_RTRIG);
1169 wr32(hw, GLGEN_RTRIG, val);
1170 ice_flush(hw);
1171
1172
1173 return ice_check_reset(hw);
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static enum ice_status
1185ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1186{
1187 u8 i;
1188
1189 if (!ice_rxq_ctx)
1190 return ICE_ERR_BAD_PTR;
1191
1192 if (rxq_index > QRX_CTRL_MAX_INDEX)
1193 return ICE_ERR_PARAM;
1194
1195
1196 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1197 wr32(hw, QRX_CONTEXT(i, rxq_index),
1198 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1199
1200 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1201 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1202 }
1203
1204 return 0;
1205}
1206
1207
1208static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1209
1210 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1211 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1212 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1213 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1214 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1215 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1216 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1217 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1218 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1219 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1220 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1221 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1222 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1223 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1224 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1225 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1226 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1227 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1228 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1229 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1230 { 0 }
1231};
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243enum ice_status
1244ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1245 u32 rxq_index)
1246{
1247 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1248
1249 if (!rlan_ctx)
1250 return ICE_ERR_BAD_PTR;
1251
1252 rlan_ctx->prefena = 1;
1253
1254 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1255 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1256}
1257
1258
1259const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1260
1261 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1262 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1263 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1264 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1265 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1266 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1267 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1268 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1269 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1270 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1271 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1272 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1273 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1274 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1275 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1276 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1277 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1278 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1279 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1280 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1281 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1282 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1283 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1284 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1285 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1286 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1287 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1288 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1289 { 0 }
1290};
1291
1292
1293
1294
1295
1296
1297
1298DEFINE_MUTEX(ice_global_cfg_lock_sw);
1299
1300
1301
1302
1303
1304
1305
1306
1307static bool ice_should_retry_sq_send_cmd(u16 opcode)
1308{
1309 switch (opcode) {
1310 case ice_aqc_opc_get_link_topo:
1311 case ice_aqc_opc_lldp_stop:
1312 case ice_aqc_opc_lldp_start:
1313 case ice_aqc_opc_lldp_filter_ctrl:
1314 return true;
1315 }
1316
1317 return false;
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static enum ice_status
1333ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1334 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1335 struct ice_sq_cd *cd)
1336{
1337 struct ice_aq_desc desc_cpy;
1338 enum ice_status status;
1339 bool is_cmd_for_retry;
1340 u8 *buf_cpy = NULL;
1341 u8 idx = 0;
1342 u16 opcode;
1343
1344 opcode = le16_to_cpu(desc->opcode);
1345 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1346 memset(&desc_cpy, 0, sizeof(desc_cpy));
1347
1348 if (is_cmd_for_retry) {
1349 if (buf) {
1350 buf_cpy = kzalloc(buf_size, GFP_KERNEL);
1351 if (!buf_cpy)
1352 return ICE_ERR_NO_MEMORY;
1353 }
1354
1355 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
1356 }
1357
1358 do {
1359 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1360
1361 if (!is_cmd_for_retry || !status ||
1362 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1363 break;
1364
1365 if (buf_cpy)
1366 memcpy(buf, buf_cpy, buf_size);
1367
1368 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
1369
1370 mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
1371
1372 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1373
1374 kfree(buf_cpy);
1375
1376 return status;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389enum ice_status
1390ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1391 u16 buf_size, struct ice_sq_cd *cd)
1392{
1393 struct ice_aqc_req_res *cmd = &desc->params.res_owner;
1394 bool lock_acquired = false;
1395 enum ice_status status;
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 switch (le16_to_cpu(desc->opcode)) {
1406 case ice_aqc_opc_download_pkg:
1407 case ice_aqc_opc_get_pkg_info_list:
1408 case ice_aqc_opc_get_ver:
1409 break;
1410 case ice_aqc_opc_release_res:
1411 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
1412 break;
1413 fallthrough;
1414 default:
1415 mutex_lock(&ice_global_cfg_lock_sw);
1416 lock_acquired = true;
1417 break;
1418 }
1419
1420 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1421 if (lock_acquired)
1422 mutex_unlock(&ice_global_cfg_lock_sw);
1423
1424 return status;
1425}
1426
1427
1428
1429
1430
1431
1432
1433
1434enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1435{
1436 struct ice_aqc_get_ver *resp;
1437 struct ice_aq_desc desc;
1438 enum ice_status status;
1439
1440 resp = &desc.params.get_ver;
1441
1442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1443
1444 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1445
1446 if (!status) {
1447 hw->fw_branch = resp->fw_branch;
1448 hw->fw_maj_ver = resp->fw_major;
1449 hw->fw_min_ver = resp->fw_minor;
1450 hw->fw_patch = resp->fw_patch;
1451 hw->fw_build = le32_to_cpu(resp->fw_build);
1452 hw->api_branch = resp->api_branch;
1453 hw->api_maj_ver = resp->api_major;
1454 hw->api_min_ver = resp->api_minor;
1455 hw->api_patch = resp->api_patch;
1456 }
1457
1458 return status;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469enum ice_status
1470ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1471 struct ice_sq_cd *cd)
1472{
1473 struct ice_aqc_driver_ver *cmd;
1474 struct ice_aq_desc desc;
1475 u16 len;
1476
1477 cmd = &desc.params.driver_ver;
1478
1479 if (!dv)
1480 return ICE_ERR_PARAM;
1481
1482 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1483
1484 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1485 cmd->major_ver = dv->major_ver;
1486 cmd->minor_ver = dv->minor_ver;
1487 cmd->build_ver = dv->build_ver;
1488 cmd->subbuild_ver = dv->subbuild_ver;
1489
1490 len = 0;
1491 while (len < sizeof(dv->driver_string) &&
1492 isascii(dv->driver_string[len]) && dv->driver_string[len])
1493 len++;
1494
1495 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1496}
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1507{
1508 struct ice_aqc_q_shutdown *cmd;
1509 struct ice_aq_desc desc;
1510
1511 cmd = &desc.params.q_shutdown;
1512
1513 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1514
1515 if (unloading)
1516 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1517
1518 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547static enum ice_status
1548ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1549 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1550 struct ice_sq_cd *cd)
1551{
1552 struct ice_aqc_req_res *cmd_resp;
1553 struct ice_aq_desc desc;
1554 enum ice_status status;
1555
1556 cmd_resp = &desc.params.res_owner;
1557
1558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1559
1560 cmd_resp->res_id = cpu_to_le16(res);
1561 cmd_resp->access_type = cpu_to_le16(access);
1562 cmd_resp->res_number = cpu_to_le32(sdp_number);
1563 cmd_resp->timeout = cpu_to_le32(*timeout);
1564 *timeout = 0;
1565
1566 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1580 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1581 *timeout = le32_to_cpu(cmd_resp->timeout);
1582 return 0;
1583 } else if (le16_to_cpu(cmd_resp->status) ==
1584 ICE_AQ_RES_GLBL_IN_PROG) {
1585 *timeout = le32_to_cpu(cmd_resp->timeout);
1586 return ICE_ERR_AQ_ERROR;
1587 } else if (le16_to_cpu(cmd_resp->status) ==
1588 ICE_AQ_RES_GLBL_DONE) {
1589 return ICE_ERR_AQ_NO_WORK;
1590 }
1591
1592
1593 *timeout = 0;
1594 return ICE_ERR_AQ_ERROR;
1595 }
1596
1597
1598
1599
1600
1601 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1602 *timeout = le32_to_cpu(cmd_resp->timeout);
1603
1604 return status;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static enum ice_status
1617ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1618 struct ice_sq_cd *cd)
1619{
1620 struct ice_aqc_req_res *cmd;
1621 struct ice_aq_desc desc;
1622
1623 cmd = &desc.params.res_owner;
1624
1625 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1626
1627 cmd->res_id = cpu_to_le16(res);
1628 cmd->res_number = cpu_to_le32(sdp_number);
1629
1630 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642enum ice_status
1643ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1644 enum ice_aq_res_access_type access, u32 timeout)
1645{
1646#define ICE_RES_POLLING_DELAY_MS 10
1647 u32 delay = ICE_RES_POLLING_DELAY_MS;
1648 u32 time_left = timeout;
1649 enum ice_status status;
1650
1651 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1652
1653
1654
1655
1656
1657
1658 if (status == ICE_ERR_AQ_NO_WORK)
1659 goto ice_acquire_res_exit;
1660
1661 if (status)
1662 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1663
1664
1665 timeout = time_left;
1666 while (status && timeout && time_left) {
1667 mdelay(delay);
1668 timeout = (timeout > delay) ? timeout - delay : 0;
1669 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1670
1671 if (status == ICE_ERR_AQ_NO_WORK)
1672
1673 break;
1674
1675 if (!status)
1676
1677 break;
1678 }
1679 if (status && status != ICE_ERR_AQ_NO_WORK)
1680 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1681
1682ice_acquire_res_exit:
1683 if (status == ICE_ERR_AQ_NO_WORK) {
1684 if (access == ICE_RES_WRITE)
1685 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1686 else
1687 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1688 }
1689 return status;
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1700{
1701 enum ice_status status;
1702 u32 total_delay = 0;
1703
1704 status = ice_aq_release_res(hw, res, 0, NULL);
1705
1706
1707
1708
1709 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1710 (total_delay < hw->adminq.sq_cmd_timeout)) {
1711 mdelay(1);
1712 status = ice_aq_release_res(hw, res, 0, NULL);
1713 total_delay++;
1714 }
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728enum ice_status
1729ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1730 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1731 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1732{
1733 struct ice_aqc_alloc_free_res_cmd *cmd;
1734 struct ice_aq_desc desc;
1735
1736 cmd = &desc.params.sw_res_ctrl;
1737
1738 if (!buf)
1739 return ICE_ERR_PARAM;
1740
1741 if (buf_size < flex_array_size(buf, elem, num_entries))
1742 return ICE_ERR_PARAM;
1743
1744 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1745
1746 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1747
1748 cmd->num_entries = cpu_to_le16(num_entries);
1749
1750 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761enum ice_status
1762ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1763{
1764 struct ice_aqc_alloc_free_res_elem *buf;
1765 enum ice_status status;
1766 u16 buf_len;
1767
1768 buf_len = struct_size(buf, elem, num);
1769 buf = kzalloc(buf_len, GFP_KERNEL);
1770 if (!buf)
1771 return ICE_ERR_NO_MEMORY;
1772
1773
1774 buf->num_elems = cpu_to_le16(num);
1775 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1776 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1777 if (btm)
1778 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1779
1780 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1781 ice_aqc_opc_alloc_res, NULL);
1782 if (status)
1783 goto ice_alloc_res_exit;
1784
1785 memcpy(res, buf->elem, sizeof(*buf->elem) * num);
1786
1787ice_alloc_res_exit:
1788 kfree(buf);
1789 return status;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1800{
1801 struct ice_aqc_alloc_free_res_elem *buf;
1802 enum ice_status status;
1803 u16 buf_len;
1804
1805 buf_len = struct_size(buf, elem, num);
1806 buf = kzalloc(buf_len, GFP_KERNEL);
1807 if (!buf)
1808 return ICE_ERR_NO_MEMORY;
1809
1810
1811 buf->num_elems = cpu_to_le16(num);
1812 buf->res_type = cpu_to_le16(type);
1813 memcpy(buf->elem, res, sizeof(*buf->elem) * num);
1814
1815 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1816 ice_aqc_opc_free_res, NULL);
1817 if (status)
1818 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1819
1820 kfree(buf);
1821 return status;
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1834{
1835 u8 funcs;
1836
1837#define ICE_CAPS_VALID_FUNCS_M 0xFF
1838 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1839 ICE_CAPS_VALID_FUNCS_M);
1840
1841 if (!funcs)
1842 return 0;
1843
1844 return max / funcs;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static bool
1861ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1862 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1863{
1864 u32 logical_id = le32_to_cpu(elem->logical_id);
1865 u32 phys_id = le32_to_cpu(elem->phys_id);
1866 u32 number = le32_to_cpu(elem->number);
1867 u16 cap = le16_to_cpu(elem->cap);
1868 bool found = true;
1869
1870 switch (cap) {
1871 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1872 caps->valid_functions = number;
1873 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1874 caps->valid_functions);
1875 break;
1876 case ICE_AQC_CAPS_SRIOV:
1877 caps->sr_iov_1_1 = (number == 1);
1878 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1879 caps->sr_iov_1_1);
1880 break;
1881 case ICE_AQC_CAPS_DCB:
1882 caps->dcb = (number == 1);
1883 caps->active_tc_bitmap = logical_id;
1884 caps->maxtc = phys_id;
1885 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1886 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1887 caps->active_tc_bitmap);
1888 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1889 break;
1890 case ICE_AQC_CAPS_RSS:
1891 caps->rss_table_size = number;
1892 caps->rss_table_entry_width = logical_id;
1893 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1894 caps->rss_table_size);
1895 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1896 caps->rss_table_entry_width);
1897 break;
1898 case ICE_AQC_CAPS_RXQS:
1899 caps->num_rxq = number;
1900 caps->rxq_first_id = phys_id;
1901 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1902 caps->num_rxq);
1903 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1904 caps->rxq_first_id);
1905 break;
1906 case ICE_AQC_CAPS_TXQS:
1907 caps->num_txq = number;
1908 caps->txq_first_id = phys_id;
1909 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1910 caps->num_txq);
1911 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1912 caps->txq_first_id);
1913 break;
1914 case ICE_AQC_CAPS_MSIX:
1915 caps->num_msix_vectors = number;
1916 caps->msix_vector_first_id = phys_id;
1917 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1918 caps->num_msix_vectors);
1919 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1920 caps->msix_vector_first_id);
1921 break;
1922 case ICE_AQC_CAPS_PENDING_NVM_VER:
1923 caps->nvm_update_pending_nvm = true;
1924 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
1925 break;
1926 case ICE_AQC_CAPS_PENDING_OROM_VER:
1927 caps->nvm_update_pending_orom = true;
1928 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
1929 break;
1930 case ICE_AQC_CAPS_PENDING_NET_VER:
1931 caps->nvm_update_pending_netlist = true;
1932 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
1933 break;
1934 case ICE_AQC_CAPS_NVM_MGMT:
1935 caps->nvm_unified_update =
1936 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1937 true : false;
1938 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1939 caps->nvm_unified_update);
1940 break;
1941 case ICE_AQC_CAPS_MAX_MTU:
1942 caps->max_mtu = number;
1943 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
1944 prefix, caps->max_mtu);
1945 break;
1946 default:
1947
1948 found = false;
1949 }
1950
1951 return found;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static void
1964ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
1965{
1966
1967
1968
1969 if (hw->dev_caps.num_funcs > 4) {
1970
1971 caps->maxtc = 4;
1972 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
1973 caps->maxtc);
1974 }
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985static void
1986ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
1987 struct ice_aqc_list_caps_elem *cap)
1988{
1989 u32 logical_id = le32_to_cpu(cap->logical_id);
1990 u32 number = le32_to_cpu(cap->number);
1991
1992 func_p->num_allocd_vfs = number;
1993 func_p->vf_base_id = logical_id;
1994 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
1995 func_p->num_allocd_vfs);
1996 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
1997 func_p->vf_base_id);
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static void
2009ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2010 struct ice_aqc_list_caps_elem *cap)
2011{
2012 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2013 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2014 le32_to_cpu(cap->number));
2015 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2016 func_p->guar_num_vsi);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026static void
2027ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
2028{
2029 u32 reg_val, val;
2030
2031 reg_val = rd32(hw, GLQF_FD_SIZE);
2032 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
2033 GLQF_FD_SIZE_FD_GSIZE_S;
2034 func_p->fd_fltr_guar =
2035 ice_get_num_per_func(hw, val);
2036 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
2037 GLQF_FD_SIZE_FD_BSIZE_S;
2038 func_p->fd_fltr_best_effort = val;
2039
2040 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
2041 func_p->fd_fltr_guar);
2042 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n",
2043 func_p->fd_fltr_best_effort);
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060static void
2061ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2062 void *buf, u32 cap_count)
2063{
2064 struct ice_aqc_list_caps_elem *cap_resp;
2065 u32 i;
2066
2067 cap_resp = buf;
2068
2069 memset(func_p, 0, sizeof(*func_p));
2070
2071 for (i = 0; i < cap_count; i++) {
2072 u16 cap = le16_to_cpu(cap_resp[i].cap);
2073 bool found;
2074
2075 found = ice_parse_common_caps(hw, &func_p->common_cap,
2076 &cap_resp[i], "func caps");
2077
2078 switch (cap) {
2079 case ICE_AQC_CAPS_VF:
2080 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2081 break;
2082 case ICE_AQC_CAPS_VSI:
2083 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2084 break;
2085 case ICE_AQC_CAPS_FD:
2086 ice_parse_fdir_func_caps(hw, func_p);
2087 break;
2088 default:
2089
2090 if (!found)
2091 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2092 i, cap);
2093 break;
2094 }
2095 }
2096
2097 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static void
2109ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2110 struct ice_aqc_list_caps_elem *cap)
2111{
2112 u32 number = le32_to_cpu(cap->number);
2113
2114 dev_p->num_funcs = hweight32(number);
2115 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2116 dev_p->num_funcs);
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static void
2128ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2129 struct ice_aqc_list_caps_elem *cap)
2130{
2131 u32 number = le32_to_cpu(cap->number);
2132
2133 dev_p->num_vfs_exposed = number;
2134 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2135 dev_p->num_vfs_exposed);
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static void
2147ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2148 struct ice_aqc_list_caps_elem *cap)
2149{
2150 u32 number = le32_to_cpu(cap->number);
2151
2152 dev_p->num_vsi_allocd_to_host = number;
2153 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2154 dev_p->num_vsi_allocd_to_host);
2155}
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165static void
2166ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2167 struct ice_aqc_list_caps_elem *cap)
2168{
2169 u32 number = le32_to_cpu(cap->number);
2170
2171 dev_p->num_flow_director_fltr = number;
2172 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
2173 dev_p->num_flow_director_fltr);
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190static void
2191ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2192 void *buf, u32 cap_count)
2193{
2194 struct ice_aqc_list_caps_elem *cap_resp;
2195 u32 i;
2196
2197 cap_resp = buf;
2198
2199 memset(dev_p, 0, sizeof(*dev_p));
2200
2201 for (i = 0; i < cap_count; i++) {
2202 u16 cap = le16_to_cpu(cap_resp[i].cap);
2203 bool found;
2204
2205 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2206 &cap_resp[i], "dev caps");
2207
2208 switch (cap) {
2209 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2210 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2211 break;
2212 case ICE_AQC_CAPS_VF:
2213 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2214 break;
2215 case ICE_AQC_CAPS_VSI:
2216 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2217 break;
2218 case ICE_AQC_CAPS_FD:
2219 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
2220 break;
2221 default:
2222
2223 if (!found)
2224 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2225 i, cap);
2226 break;
2227 }
2228 }
2229
2230 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2231}
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252enum ice_status
2253ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2254 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2255{
2256 struct ice_aqc_list_caps *cmd;
2257 struct ice_aq_desc desc;
2258 enum ice_status status;
2259
2260 cmd = &desc.params.get_cap;
2261
2262 if (opc != ice_aqc_opc_list_func_caps &&
2263 opc != ice_aqc_opc_list_dev_caps)
2264 return ICE_ERR_PARAM;
2265
2266 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2267 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2268
2269 if (cap_count)
2270 *cap_count = le32_to_cpu(cmd->count);
2271
2272 return status;
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283enum ice_status
2284ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2285{
2286 enum ice_status status;
2287 u32 cap_count = 0;
2288 void *cbuf;
2289
2290 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2291 if (!cbuf)
2292 return ICE_ERR_NO_MEMORY;
2293
2294
2295
2296
2297
2298 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2299
2300 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2301 ice_aqc_opc_list_dev_caps, NULL);
2302 if (!status)
2303 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2304 kfree(cbuf);
2305
2306 return status;
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317static enum ice_status
2318ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2319{
2320 enum ice_status status;
2321 u32 cap_count = 0;
2322 void *cbuf;
2323
2324 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
2325 if (!cbuf)
2326 return ICE_ERR_NO_MEMORY;
2327
2328
2329
2330
2331
2332 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2333
2334 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2335 ice_aqc_opc_list_func_caps, NULL);
2336 if (!status)
2337 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2338 kfree(cbuf);
2339
2340 return status;
2341}
2342
2343
2344
2345
2346
2347void ice_set_safe_mode_caps(struct ice_hw *hw)
2348{
2349 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2350 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2351 struct ice_hw_common_caps cached_caps;
2352 u32 num_funcs;
2353
2354
2355 cached_caps = func_caps->common_cap;
2356
2357
2358 memset(func_caps, 0, sizeof(*func_caps));
2359
2360#define ICE_RESTORE_FUNC_CAP(name) \
2361 func_caps->common_cap.name = cached_caps.name
2362
2363
2364 ICE_RESTORE_FUNC_CAP(valid_functions);
2365 ICE_RESTORE_FUNC_CAP(txq_first_id);
2366 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2367 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2368 ICE_RESTORE_FUNC_CAP(max_mtu);
2369 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2370 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm);
2371 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom);
2372 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist);
2373
2374
2375 func_caps->common_cap.num_rxq = 1;
2376 func_caps->common_cap.num_txq = 1;
2377
2378
2379 func_caps->common_cap.num_msix_vectors = 2;
2380 func_caps->guar_num_vsi = 1;
2381
2382
2383 cached_caps = dev_caps->common_cap;
2384 num_funcs = dev_caps->num_funcs;
2385
2386
2387 memset(dev_caps, 0, sizeof(*dev_caps));
2388
2389#define ICE_RESTORE_DEV_CAP(name) \
2390 dev_caps->common_cap.name = cached_caps.name
2391
2392
2393 ICE_RESTORE_DEV_CAP(valid_functions);
2394 ICE_RESTORE_DEV_CAP(txq_first_id);
2395 ICE_RESTORE_DEV_CAP(rxq_first_id);
2396 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2397 ICE_RESTORE_DEV_CAP(max_mtu);
2398 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2399 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm);
2400 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom);
2401 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist);
2402 dev_caps->num_funcs = num_funcs;
2403
2404
2405 dev_caps->common_cap.num_rxq = num_funcs;
2406 dev_caps->common_cap.num_txq = num_funcs;
2407
2408
2409 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2410}
2411
2412
2413
2414
2415
2416enum ice_status ice_get_caps(struct ice_hw *hw)
2417{
2418 enum ice_status status;
2419
2420 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2421 if (status)
2422 return status;
2423
2424 return ice_discover_func_caps(hw, &hw->func_caps);
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436enum ice_status
2437ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2438 struct ice_sq_cd *cd)
2439{
2440 struct ice_aqc_manage_mac_write *cmd;
2441 struct ice_aq_desc desc;
2442
2443 cmd = &desc.params.mac_write;
2444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2445
2446 cmd->flags = flags;
2447 ether_addr_copy(cmd->mac_addr, mac_addr);
2448
2449 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2450}
2451
2452
2453
2454
2455
2456
2457
2458static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2459{
2460 struct ice_aq_desc desc;
2461
2462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2463 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2464
2465 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2466}
2467
2468
2469
2470
2471
2472
2473
2474
2475void ice_clear_pxe_mode(struct ice_hw *hw)
2476{
2477 if (ice_check_sq_alive(hw, &hw->adminq))
2478 ice_aq_clear_pxe_mode(hw);
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494static u16
2495ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2496{
2497 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2498 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2499
2500 switch (phy_type_low) {
2501 case ICE_PHY_TYPE_LOW_100BASE_TX:
2502 case ICE_PHY_TYPE_LOW_100M_SGMII:
2503 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2504 break;
2505 case ICE_PHY_TYPE_LOW_1000BASE_T:
2506 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2507 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2508 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2509 case ICE_PHY_TYPE_LOW_1G_SGMII:
2510 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2511 break;
2512 case ICE_PHY_TYPE_LOW_2500BASE_T:
2513 case ICE_PHY_TYPE_LOW_2500BASE_X:
2514 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2515 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2516 break;
2517 case ICE_PHY_TYPE_LOW_5GBASE_T:
2518 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2519 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2520 break;
2521 case ICE_PHY_TYPE_LOW_10GBASE_T:
2522 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2523 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2524 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2525 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2526 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2527 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2528 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2529 break;
2530 case ICE_PHY_TYPE_LOW_25GBASE_T:
2531 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2532 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2533 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2534 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2535 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2536 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2537 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2538 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2539 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2540 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2541 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2542 break;
2543 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2544 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2545 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2546 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2547 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2548 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2549 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2550 break;
2551 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2552 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2553 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2554 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2555 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2556 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2557 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2558 case ICE_PHY_TYPE_LOW_50G_AUI2:
2559 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2560 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2561 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2562 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2563 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2564 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2565 case ICE_PHY_TYPE_LOW_50G_AUI1:
2566 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2567 break;
2568 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2569 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2570 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2571 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2572 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2573 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2574 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2575 case ICE_PHY_TYPE_LOW_100G_AUI4:
2576 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2577 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2578 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2579 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2580 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2581 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2582 break;
2583 default:
2584 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2585 break;
2586 }
2587
2588 switch (phy_type_high) {
2589 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2590 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2591 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2592 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2593 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2594 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2595 break;
2596 default:
2597 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2598 break;
2599 }
2600
2601 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2602 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2603 return ICE_AQ_LINK_SPEED_UNKNOWN;
2604 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2605 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2606 return ICE_AQ_LINK_SPEED_UNKNOWN;
2607 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2608 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2609 return speed_phy_type_low;
2610 else
2611 return speed_phy_type_high;
2612}
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629void
2630ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2631 u16 link_speeds_bitmap)
2632{
2633 u64 pt_high;
2634 u64 pt_low;
2635 int index;
2636 u16 speed;
2637
2638
2639 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2640 pt_low = BIT_ULL(index);
2641 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2642
2643 if (link_speeds_bitmap & speed)
2644 *phy_type_low |= BIT_ULL(index);
2645 }
2646
2647
2648 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2649 pt_high = BIT_ULL(index);
2650 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2651
2652 if (link_speeds_bitmap & speed)
2653 *phy_type_high |= BIT_ULL(index);
2654 }
2655}
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669enum ice_status
2670ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2671 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2672{
2673 struct ice_aq_desc desc;
2674 enum ice_status status;
2675
2676 if (!cfg)
2677 return ICE_ERR_PARAM;
2678
2679
2680 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2681 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2682 cfg->caps);
2683
2684 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2685 }
2686
2687 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2688 desc.params.set_phy.lport_num = pi->lport;
2689 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2690
2691 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2692 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2693 (unsigned long long)le64_to_cpu(cfg->phy_type_low));
2694 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2695 (unsigned long long)le64_to_cpu(cfg->phy_type_high));
2696 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2697 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2698 cfg->low_power_ctrl_an);
2699 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2700 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2701 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2702 cfg->link_fec_opt);
2703
2704 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2705 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2706 status = 0;
2707
2708 if (!status)
2709 pi->phy.curr_user_phy_cfg = *cfg;
2710
2711 return status;
2712}
2713
2714
2715
2716
2717
2718enum ice_status ice_update_link_info(struct ice_port_info *pi)
2719{
2720 struct ice_link_status *li;
2721 enum ice_status status;
2722
2723 if (!pi)
2724 return ICE_ERR_PARAM;
2725
2726 li = &pi->phy.link_info;
2727
2728 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2729 if (status)
2730 return status;
2731
2732 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2733 struct ice_aqc_get_phy_caps_data *pcaps;
2734 struct ice_hw *hw;
2735
2736 hw = pi->hw;
2737 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2738 GFP_KERNEL);
2739 if (!pcaps)
2740 return ICE_ERR_NO_MEMORY;
2741
2742 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2743 pcaps, NULL);
2744
2745 devm_kfree(ice_hw_to_dev(hw), pcaps);
2746 }
2747
2748 return status;
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759static void
2760ice_cache_phy_user_req(struct ice_port_info *pi,
2761 struct ice_phy_cache_mode_data cache_data,
2762 enum ice_phy_cache_mode cache_mode)
2763{
2764 if (!pi)
2765 return;
2766
2767 switch (cache_mode) {
2768 case ICE_FC_MODE:
2769 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2770 break;
2771 case ICE_SPEED_MODE:
2772 pi->phy.curr_user_speed_req =
2773 cache_data.data.curr_user_speed_req;
2774 break;
2775 case ICE_FEC_MODE:
2776 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2777 break;
2778 default:
2779 break;
2780 }
2781}
2782
2783
2784
2785
2786
2787
2788
2789enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2790{
2791 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2792 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2793 return ICE_FC_FULL;
2794
2795 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2796 return ICE_FC_TX_PAUSE;
2797
2798 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2799 return ICE_FC_RX_PAUSE;
2800
2801 return ICE_FC_NONE;
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2812{
2813 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2814 return ICE_FEC_AUTO;
2815
2816 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2817 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2818 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2819 ICE_AQC_PHY_FEC_25G_KR_REQ))
2820 return ICE_FEC_BASER;
2821
2822 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2823 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2824 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2825 return ICE_FEC_RS;
2826
2827 return ICE_FEC_NONE;
2828}
2829
2830
2831
2832
2833
2834
2835
2836enum ice_status
2837ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2838 enum ice_fc_mode req_mode)
2839{
2840 struct ice_phy_cache_mode_data cache_data;
2841 u8 pause_mask = 0x0;
2842
2843 if (!pi || !cfg)
2844 return ICE_ERR_BAD_PTR;
2845
2846 switch (req_mode) {
2847 case ICE_FC_FULL:
2848 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2849 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2850 break;
2851 case ICE_FC_RX_PAUSE:
2852 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2853 break;
2854 case ICE_FC_TX_PAUSE:
2855 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2856 break;
2857 default:
2858 break;
2859 }
2860
2861
2862 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2863 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2864
2865
2866 cfg->caps |= pause_mask;
2867
2868
2869 cache_data.data.curr_user_fc_req = req_mode;
2870 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2871
2872 return 0;
2873}
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883enum ice_status
2884ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2885{
2886 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2887 struct ice_aqc_get_phy_caps_data *pcaps;
2888 enum ice_status status;
2889 struct ice_hw *hw;
2890
2891 if (!pi || !aq_failures)
2892 return ICE_ERR_BAD_PTR;
2893
2894 *aq_failures = 0;
2895 hw = pi->hw;
2896
2897 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2898 if (!pcaps)
2899 return ICE_ERR_NO_MEMORY;
2900
2901
2902 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
2903 pcaps, NULL);
2904 if (status) {
2905 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2906 goto out;
2907 }
2908
2909 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2910
2911
2912 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
2913 if (status)
2914 goto out;
2915
2916
2917 if (cfg.caps != pcaps->caps) {
2918 int retry_count, retry_max = 10;
2919
2920
2921 if (ena_auto_link_update)
2922 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2923
2924 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2925 if (status) {
2926 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2927 goto out;
2928 }
2929
2930
2931
2932
2933
2934
2935 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2936 status = ice_update_link_info(pi);
2937
2938 if (!status)
2939 break;
2940
2941 mdelay(100);
2942 }
2943
2944 if (status)
2945 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2946 }
2947
2948out:
2949 devm_kfree(ice_hw_to_dev(hw), pcaps);
2950 return status;
2951}
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961bool
2962ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2963 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2964{
2965 u8 caps_mask, cfg_mask;
2966
2967 if (!phy_caps || !phy_cfg)
2968 return false;
2969
2970
2971
2972
2973 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2974 ICE_AQC_GET_PHY_EN_MOD_QUAL);
2975 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2976
2977 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2978 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2979 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2980 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2981 phy_caps->eee_cap != phy_cfg->eee_cap ||
2982 phy_caps->eeer_value != phy_cfg->eeer_value ||
2983 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2984 return false;
2985
2986 return true;
2987}
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998void
2999ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3000 struct ice_aqc_get_phy_caps_data *caps,
3001 struct ice_aqc_set_phy_cfg_data *cfg)
3002{
3003 if (!pi || !caps || !cfg)
3004 return;
3005
3006 memset(cfg, 0, sizeof(*cfg));
3007 cfg->phy_type_low = caps->phy_type_low;
3008 cfg->phy_type_high = caps->phy_type_high;
3009 cfg->caps = caps->caps;
3010 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3011 cfg->eee_cap = caps->eee_cap;
3012 cfg->eeer_value = caps->eeer_value;
3013 cfg->link_fec_opt = caps->link_fec_options;
3014 cfg->module_compliance_enforcement =
3015 caps->module_compliance_enforcement;
3016}
3017
3018
3019
3020
3021
3022
3023
3024enum ice_status
3025ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3026 enum ice_fec_mode fec)
3027{
3028 struct ice_aqc_get_phy_caps_data *pcaps;
3029 enum ice_status status;
3030 struct ice_hw *hw;
3031
3032 if (!pi || !cfg)
3033 return ICE_ERR_BAD_PTR;
3034
3035 hw = pi->hw;
3036
3037 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
3038 if (!pcaps)
3039 return ICE_ERR_NO_MEMORY;
3040
3041 status = ice_aq_get_phy_caps(pi, false,
3042 (ice_fw_supports_report_dflt_cfg(hw) ?
3043 ICE_AQC_REPORT_DFLT_CFG :
3044 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
3045 if (status)
3046 goto out;
3047
3048 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
3049 cfg->link_fec_opt = pcaps->link_fec_options;
3050
3051 switch (fec) {
3052 case ICE_FEC_BASER:
3053
3054
3055
3056 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3057 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3058 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3059 ICE_AQC_PHY_FEC_25G_KR_REQ;
3060 break;
3061 case ICE_FEC_RS:
3062
3063
3064
3065 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3066 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3067 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3068 break;
3069 case ICE_FEC_NONE:
3070
3071 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3072 break;
3073 case ICE_FEC_AUTO:
3074
3075 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3076 cfg->link_fec_opt |= pcaps->link_fec_options;
3077 break;
3078 default:
3079 status = ICE_ERR_PARAM;
3080 break;
3081 }
3082
3083 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
3084 !ice_fw_supports_report_dflt_cfg(hw)) {
3085 struct ice_link_default_override_tlv tlv;
3086
3087 if (ice_get_link_default_override(&tlv, pi))
3088 goto out;
3089
3090 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3091 (tlv.options & ICE_LINK_OVERRIDE_EN))
3092 cfg->link_fec_opt = tlv.fec_options;
3093 }
3094
3095out:
3096 kfree(pcaps);
3097
3098 return status;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3111{
3112 struct ice_phy_info *phy_info;
3113 enum ice_status status = 0;
3114
3115 if (!pi || !link_up)
3116 return ICE_ERR_PARAM;
3117
3118 phy_info = &pi->phy;
3119
3120 if (phy_info->get_link_info) {
3121 status = ice_update_link_info(pi);
3122
3123 if (status)
3124 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3125 status);
3126 }
3127
3128 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3129
3130 return status;
3131}
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141enum ice_status
3142ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3143 struct ice_sq_cd *cd)
3144{
3145 struct ice_aqc_restart_an *cmd;
3146 struct ice_aq_desc desc;
3147
3148 cmd = &desc.params.restart_an;
3149
3150 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3151
3152 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3153 cmd->lport_num = pi->lport;
3154 if (ena_link)
3155 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3156 else
3157 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3158
3159 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3160}
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171enum ice_status
3172ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3173 struct ice_sq_cd *cd)
3174{
3175 struct ice_aqc_set_event_mask *cmd;
3176 struct ice_aq_desc desc;
3177
3178 cmd = &desc.params.set_event_mask;
3179
3180 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3181
3182 cmd->lport_num = port_num;
3183
3184 cmd->event_mask = cpu_to_le16(mask);
3185 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3186}
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196enum ice_status
3197ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3198{
3199 struct ice_aqc_set_mac_lb *cmd;
3200 struct ice_aq_desc desc;
3201
3202 cmd = &desc.params.set_mac_lb;
3203
3204 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3205 if (ena_lpbk)
3206 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3207
3208 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3209}
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219enum ice_status
3220ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3221 struct ice_sq_cd *cd)
3222{
3223 struct ice_aqc_set_port_id_led *cmd;
3224 struct ice_hw *hw = pi->hw;
3225 struct ice_aq_desc desc;
3226
3227 cmd = &desc.params.set_port_id_led;
3228
3229 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3230
3231 if (is_orig_mode)
3232 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3233 else
3234 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3235
3236 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254enum ice_status
3255ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3256 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3257 bool write, struct ice_sq_cd *cd)
3258{
3259 struct ice_aqc_sff_eeprom *cmd;
3260 struct ice_aq_desc desc;
3261 enum ice_status status;
3262
3263 if (!data || (mem_addr & 0xff00))
3264 return ICE_ERR_PARAM;
3265
3266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3267 cmd = &desc.params.read_write_sff_param;
3268 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
3269 cmd->lport_num = (u8)(lport & 0xff);
3270 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3271 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) &
3272 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3273 ((set_page <<
3274 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3275 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3276 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff);
3277 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3278 if (write)
3279 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE);
3280
3281 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3282 return status;
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293static enum ice_status
3294__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3295{
3296 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3297 struct ice_aqc_get_set_rss_lut *cmd_resp;
3298 struct ice_aq_desc desc;
3299 enum ice_status status;
3300 u8 *lut;
3301
3302 if (!params)
3303 return ICE_ERR_PARAM;
3304
3305 vsi_handle = params->vsi_handle;
3306 lut = params->lut;
3307
3308 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3309 return ICE_ERR_PARAM;
3310
3311 lut_size = params->lut_size;
3312 lut_type = params->lut_type;
3313 glob_lut_idx = params->global_lut_id;
3314 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3315
3316 cmd_resp = &desc.params.get_set_rss_lut;
3317
3318 if (set) {
3319 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3320 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3321 } else {
3322 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3323 }
3324
3325 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3326 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3327 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3328 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3329
3330 switch (lut_type) {
3331 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3332 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3333 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3334 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3335 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3336 break;
3337 default:
3338 status = ICE_ERR_PARAM;
3339 goto ice_aq_get_set_rss_lut_exit;
3340 }
3341
3342 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3343 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3344 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3345
3346 if (!set)
3347 goto ice_aq_get_set_rss_lut_send;
3348 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3349 if (!set)
3350 goto ice_aq_get_set_rss_lut_send;
3351 } else {
3352 goto ice_aq_get_set_rss_lut_send;
3353 }
3354
3355
3356 switch (lut_size) {
3357 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3358 break;
3359 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3360 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3361 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3362 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3363 break;
3364 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3365 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3366 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3367 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3368 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3369 break;
3370 }
3371 fallthrough;
3372 default:
3373 status = ICE_ERR_PARAM;
3374 goto ice_aq_get_set_rss_lut_exit;
3375 }
3376
3377ice_aq_get_set_rss_lut_send:
3378 cmd_resp->flags = cpu_to_le16(flags);
3379 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3380
3381ice_aq_get_set_rss_lut_exit:
3382 return status;
3383}
3384
3385
3386
3387
3388
3389
3390
3391
3392enum ice_status
3393ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3394{
3395 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3396}
3397
3398
3399
3400
3401
3402
3403
3404
3405enum ice_status
3406ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3407{
3408 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420static enum
3421ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3422 struct ice_aqc_get_set_rss_keys *key,
3423 bool set)
3424{
3425 struct ice_aqc_get_set_rss_key *cmd_resp;
3426 u16 key_size = sizeof(*key);
3427 struct ice_aq_desc desc;
3428
3429 cmd_resp = &desc.params.get_set_rss_key;
3430
3431 if (set) {
3432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3433 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3434 } else {
3435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3436 }
3437
3438 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
3439 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3440 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3441 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3442
3443 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3444}
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454enum ice_status
3455ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3456 struct ice_aqc_get_set_rss_keys *key)
3457{
3458 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3459 return ICE_ERR_PARAM;
3460
3461 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3462 key, false);
3463}
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473enum ice_status
3474ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3475 struct ice_aqc_get_set_rss_keys *keys)
3476{
3477 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3478 return ICE_ERR_PARAM;
3479
3480 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3481 keys, true);
3482}
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505static enum ice_status
3506ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3507 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3508 struct ice_sq_cd *cd)
3509{
3510 struct ice_aqc_add_tx_qgrp *list;
3511 struct ice_aqc_add_txqs *cmd;
3512 struct ice_aq_desc desc;
3513 u16 i, sum_size = 0;
3514
3515 cmd = &desc.params.add_txqs;
3516
3517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3518
3519 if (!qg_list)
3520 return ICE_ERR_PARAM;
3521
3522 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3523 return ICE_ERR_PARAM;
3524
3525 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3526 sum_size += struct_size(list, txqs, list->num_txqs);
3527 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3528 list->num_txqs);
3529 }
3530
3531 if (buf_size != sum_size)
3532 return ICE_ERR_PARAM;
3533
3534 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3535
3536 cmd->num_qgrps = num_qgrps;
3537
3538 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3539}
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553static enum ice_status
3554ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3555 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3556 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3557 struct ice_sq_cd *cd)
3558{
3559 struct ice_aqc_dis_txq_item *item;
3560 struct ice_aqc_dis_txqs *cmd;
3561 struct ice_aq_desc desc;
3562 enum ice_status status;
3563 u16 i, sz = 0;
3564
3565 cmd = &desc.params.dis_txqs;
3566 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3567
3568
3569 if (!qg_list && !rst_src)
3570 return ICE_ERR_PARAM;
3571
3572 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3573 return ICE_ERR_PARAM;
3574
3575 cmd->num_entries = num_qgrps;
3576
3577 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3578 ICE_AQC_Q_DIS_TIMEOUT_M);
3579
3580 switch (rst_src) {
3581 case ICE_VM_RESET:
3582 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3583 cmd->vmvf_and_timeout |=
3584 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3585 break;
3586 case ICE_VF_RESET:
3587 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3588
3589 cmd->vmvf_and_timeout |=
3590 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
3591 ICE_AQC_Q_DIS_VMVF_NUM_M);
3592 break;
3593 case ICE_NO_RESET:
3594 default:
3595 break;
3596 }
3597
3598
3599 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3600
3601 if (!qg_list)
3602 goto do_aq;
3603
3604
3605
3606
3607 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
3608
3609 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3610 u16 item_size = struct_size(item, q_id, item->num_qs);
3611
3612
3613 if ((item->num_qs % 2) == 0)
3614 item_size += 2;
3615
3616 sz += item_size;
3617
3618 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3619 }
3620
3621 if (buf_size != sz)
3622 return ICE_ERR_PARAM;
3623
3624do_aq:
3625 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3626 if (status) {
3627 if (!qg_list)
3628 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3629 vmvf_num, hw->adminq.sq_last_status);
3630 else
3631 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3632 le16_to_cpu(qg_list[0].q_id[0]),
3633 hw->adminq.sq_last_status);
3634 }
3635 return status;
3636}
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646static void
3647ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3648{
3649 u8 src_byte, dest_byte, mask;
3650 u8 *from, *dest;
3651 u16 shift_width;
3652
3653
3654 from = src_ctx + ce_info->offset;
3655
3656
3657 shift_width = ce_info->lsb % 8;
3658 mask = (u8)(BIT(ce_info->width) - 1);
3659
3660 src_byte = *from;
3661 src_byte &= mask;
3662
3663
3664 mask <<= shift_width;
3665 src_byte <<= shift_width;
3666
3667
3668 dest = dest_ctx + (ce_info->lsb / 8);
3669
3670 memcpy(&dest_byte, dest, sizeof(dest_byte));
3671
3672 dest_byte &= ~mask;
3673 dest_byte |= src_byte;
3674
3675
3676 memcpy(dest, &dest_byte, sizeof(dest_byte));
3677}
3678
3679
3680
3681
3682
3683
3684
3685static void
3686ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3687{
3688 u16 src_word, mask;
3689 __le16 dest_word;
3690 u8 *from, *dest;
3691 u16 shift_width;
3692
3693
3694 from = src_ctx + ce_info->offset;
3695
3696
3697 shift_width = ce_info->lsb % 8;
3698 mask = BIT(ce_info->width) - 1;
3699
3700
3701
3702
3703 src_word = *(u16 *)from;
3704 src_word &= mask;
3705
3706
3707 mask <<= shift_width;
3708 src_word <<= shift_width;
3709
3710
3711 dest = dest_ctx + (ce_info->lsb / 8);
3712
3713 memcpy(&dest_word, dest, sizeof(dest_word));
3714
3715 dest_word &= ~(cpu_to_le16(mask));
3716 dest_word |= cpu_to_le16(src_word);
3717
3718
3719 memcpy(dest, &dest_word, sizeof(dest_word));
3720}
3721
3722
3723
3724
3725
3726
3727
3728static void
3729ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3730{
3731 u32 src_dword, mask;
3732 __le32 dest_dword;
3733 u8 *from, *dest;
3734 u16 shift_width;
3735
3736
3737 from = src_ctx + ce_info->offset;
3738
3739
3740 shift_width = ce_info->lsb % 8;
3741
3742
3743
3744
3745
3746 if (ce_info->width < 32)
3747 mask = BIT(ce_info->width) - 1;
3748 else
3749 mask = (u32)~0;
3750
3751
3752
3753
3754 src_dword = *(u32 *)from;
3755 src_dword &= mask;
3756
3757
3758 mask <<= shift_width;
3759 src_dword <<= shift_width;
3760
3761
3762 dest = dest_ctx + (ce_info->lsb / 8);
3763
3764 memcpy(&dest_dword, dest, sizeof(dest_dword));
3765
3766 dest_dword &= ~(cpu_to_le32(mask));
3767 dest_dword |= cpu_to_le32(src_dword);
3768
3769
3770 memcpy(dest, &dest_dword, sizeof(dest_dword));
3771}
3772
3773
3774
3775
3776
3777
3778
3779static void
3780ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3781{
3782 u64 src_qword, mask;
3783 __le64 dest_qword;
3784 u8 *from, *dest;
3785 u16 shift_width;
3786
3787
3788 from = src_ctx + ce_info->offset;
3789
3790
3791 shift_width = ce_info->lsb % 8;
3792
3793
3794
3795
3796
3797 if (ce_info->width < 64)
3798 mask = BIT_ULL(ce_info->width) - 1;
3799 else
3800 mask = (u64)~0;
3801
3802
3803
3804
3805 src_qword = *(u64 *)from;
3806 src_qword &= mask;
3807
3808
3809 mask <<= shift_width;
3810 src_qword <<= shift_width;
3811
3812
3813 dest = dest_ctx + (ce_info->lsb / 8);
3814
3815 memcpy(&dest_qword, dest, sizeof(dest_qword));
3816
3817 dest_qword &= ~(cpu_to_le64(mask));
3818 dest_qword |= cpu_to_le64(src_qword);
3819
3820
3821 memcpy(dest, &dest_qword, sizeof(dest_qword));
3822}
3823
3824
3825
3826
3827
3828
3829
3830
3831enum ice_status
3832ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
3833 const struct ice_ctx_ele *ce_info)
3834{
3835 int f;
3836
3837 for (f = 0; ce_info[f].width; f++) {
3838
3839
3840
3841
3842 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
3843 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
3844 f, ce_info[f].width, ce_info[f].size_of);
3845 continue;
3846 }
3847 switch (ce_info[f].size_of) {
3848 case sizeof(u8):
3849 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3850 break;
3851 case sizeof(u16):
3852 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3853 break;
3854 case sizeof(u32):
3855 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3856 break;
3857 case sizeof(u64):
3858 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3859 break;
3860 default:
3861 return ICE_ERR_INVAL_SIZE;
3862 }
3863 }
3864
3865 return 0;
3866}
3867
3868
3869
3870
3871
3872
3873
3874
3875struct ice_q_ctx *
3876ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3877{
3878 struct ice_vsi_ctx *vsi;
3879 struct ice_q_ctx *q_ctx;
3880
3881 vsi = ice_get_vsi_ctx(hw, vsi_handle);
3882 if (!vsi)
3883 return NULL;
3884 if (q_handle >= vsi->num_lan_q_entries[tc])
3885 return NULL;
3886 if (!vsi->lan_q_ctx[tc])
3887 return NULL;
3888 q_ctx = vsi->lan_q_ctx[tc];
3889 return &q_ctx[q_handle];
3890}
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905enum ice_status
3906ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
3907 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
3908 struct ice_sq_cd *cd)
3909{
3910 struct ice_aqc_txsched_elem_data node = { 0 };
3911 struct ice_sched_node *parent;
3912 struct ice_q_ctx *q_ctx;
3913 enum ice_status status;
3914 struct ice_hw *hw;
3915
3916 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3917 return ICE_ERR_CFG;
3918
3919 if (num_qgrps > 1 || buf->num_txqs > 1)
3920 return ICE_ERR_MAX_LIMIT;
3921
3922 hw = pi->hw;
3923
3924 if (!ice_is_vsi_valid(hw, vsi_handle))
3925 return ICE_ERR_PARAM;
3926
3927 mutex_lock(&pi->sched_lock);
3928
3929 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
3930 if (!q_ctx) {
3931 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
3932 q_handle);
3933 status = ICE_ERR_PARAM;
3934 goto ena_txq_exit;
3935 }
3936
3937
3938 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3939 ICE_SCHED_NODE_OWNER_LAN);
3940 if (!parent) {
3941 status = ICE_ERR_PARAM;
3942 goto ena_txq_exit;
3943 }
3944
3945 buf->parent_teid = parent->info.node_teid;
3946 node.parent_teid = parent->info.node_teid;
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958 buf->txqs[0].info.valid_sections =
3959 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
3960 ICE_AQC_ELEM_VALID_EIR;
3961 buf->txqs[0].info.generic = 0;
3962 buf->txqs[0].info.cir_bw.bw_profile_idx =
3963 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3964 buf->txqs[0].info.cir_bw.bw_alloc =
3965 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3966 buf->txqs[0].info.eir_bw.bw_profile_idx =
3967 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3968 buf->txqs[0].info.eir_bw.bw_alloc =
3969 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
3970
3971
3972 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3973 if (status) {
3974 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3975 le16_to_cpu(buf->txqs[0].txq_id),
3976 hw->adminq.sq_last_status);
3977 goto ena_txq_exit;
3978 }
3979
3980 node.node_teid = buf->txqs[0].q_teid;
3981 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3982 q_ctx->q_handle = q_handle;
3983 q_ctx->q_teid = le32_to_cpu(node.node_teid);
3984
3985
3986 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3987 if (!status)
3988 status = ice_sched_replay_q_bw(pi, q_ctx);
3989
3990ena_txq_exit:
3991 mutex_unlock(&pi->sched_lock);
3992 return status;
3993}
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010enum ice_status
4011ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4012 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4013 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4014 struct ice_sq_cd *cd)
4015{
4016 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4017 struct ice_aqc_dis_txq_item *qg_list;
4018 struct ice_q_ctx *q_ctx;
4019 struct ice_hw *hw;
4020 u16 i, buf_size;
4021
4022 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4023 return ICE_ERR_CFG;
4024
4025 hw = pi->hw;
4026
4027 if (!num_queues) {
4028
4029
4030
4031
4032 if (rst_src)
4033 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4034 vmvf_num, NULL);
4035 return ICE_ERR_CFG;
4036 }
4037
4038 buf_size = struct_size(qg_list, q_id, 1);
4039 qg_list = kzalloc(buf_size, GFP_KERNEL);
4040 if (!qg_list)
4041 return ICE_ERR_NO_MEMORY;
4042
4043 mutex_lock(&pi->sched_lock);
4044
4045 for (i = 0; i < num_queues; i++) {
4046 struct ice_sched_node *node;
4047
4048 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4049 if (!node)
4050 continue;
4051 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4052 if (!q_ctx) {
4053 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4054 q_handles[i]);
4055 continue;
4056 }
4057 if (q_ctx->q_handle != q_handles[i]) {
4058 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4059 q_ctx->q_handle, q_handles[i]);
4060 continue;
4061 }
4062 qg_list->parent_teid = node->info.parent_teid;
4063 qg_list->num_qs = 1;
4064 qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
4065 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4066 vmvf_num, cd);
4067
4068 if (status)
4069 break;
4070 ice_free_sched_node(pi, node);
4071 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4072 }
4073 mutex_unlock(&pi->sched_lock);
4074 kfree(qg_list);
4075 return status;
4076}
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088static enum ice_status
4089ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4090 u16 *maxqs, u8 owner)
4091{
4092 enum ice_status status = 0;
4093 u8 i;
4094
4095 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4096 return ICE_ERR_CFG;
4097
4098 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4099 return ICE_ERR_PARAM;
4100
4101 mutex_lock(&pi->sched_lock);
4102
4103 ice_for_each_traffic_class(i) {
4104
4105 if (!ice_sched_get_tc_node(pi, i))
4106 continue;
4107
4108 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4109 ice_is_tc_ena(tc_bitmap, i));
4110 if (status)
4111 break;
4112 }
4113
4114 mutex_unlock(&pi->sched_lock);
4115 return status;
4116}
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127enum ice_status
4128ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
4129 u16 *max_lanqs)
4130{
4131 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4132 ICE_SCHED_NODE_OWNER_LAN);
4133}
4134
4135
4136
4137
4138
4139
4140
4141static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4142{
4143 struct ice_switch_info *sw = hw->switch_info;
4144 u8 i;
4145
4146
4147 ice_rm_all_sw_replay_rule_info(hw);
4148
4149
4150
4151
4152 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
4153 list_replace_init(&sw->recp_list[i].filt_rules,
4154 &sw->recp_list[i].filt_replay_rules);
4155 ice_sched_replay_agg_vsi_preinit(hw);
4156
4157 return 0;
4158}
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4169{
4170 enum ice_status status;
4171
4172 if (!ice_is_vsi_valid(hw, vsi_handle))
4173 return ICE_ERR_PARAM;
4174
4175
4176 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4177 status = ice_replay_pre_init(hw);
4178 if (status)
4179 return status;
4180 }
4181
4182 status = ice_replay_rss_cfg(hw, vsi_handle);
4183 if (status)
4184 return status;
4185
4186 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4187 if (!status)
4188 status = ice_replay_vsi_agg(hw, vsi_handle);
4189 return status;
4190}
4191
4192
4193
4194
4195
4196
4197
4198void ice_replay_post(struct ice_hw *hw)
4199{
4200
4201 ice_rm_all_sw_replay_rule_info(hw);
4202 ice_sched_replay_agg(hw);
4203}
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213void
4214ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4215 u64 *prev_stat, u64 *cur_stat)
4216{
4217 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4218
4219
4220
4221
4222
4223
4224 if (!prev_stat_loaded) {
4225 *prev_stat = new_data;
4226 return;
4227 }
4228
4229
4230
4231
4232 if (new_data >= *prev_stat)
4233 *cur_stat += new_data - *prev_stat;
4234 else
4235
4236 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4237
4238
4239 *prev_stat = new_data;
4240}
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250void
4251ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4252 u64 *prev_stat, u64 *cur_stat)
4253{
4254 u32 new_data;
4255
4256 new_data = rd32(hw, reg);
4257
4258
4259
4260
4261
4262
4263 if (!prev_stat_loaded) {
4264 *prev_stat = new_data;
4265 return;
4266 }
4267
4268
4269
4270
4271 if (new_data >= *prev_stat)
4272 *cur_stat += new_data - *prev_stat;
4273 else
4274
4275 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4276
4277
4278 *prev_stat = new_data;
4279}
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289enum ice_status
4290ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4291 struct ice_aqc_txsched_elem_data *buf)
4292{
4293 u16 buf_size, num_elem_ret = 0;
4294 enum ice_status status;
4295
4296 buf_size = sizeof(*buf);
4297 memset(buf, 0, buf_size);
4298 buf->node_teid = cpu_to_le32(node_teid);
4299 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4300 NULL);
4301 if (status || num_elem_ret != 1)
4302 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4303 return status;
4304}
4305
4306
4307
4308
4309
4310
4311
4312bool ice_fw_supports_link_override(struct ice_hw *hw)
4313{
4314 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4315 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4316 return true;
4317 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4318 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4319 return true;
4320 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4321 return true;
4322 }
4323
4324 return false;
4325}
4326
4327
4328
4329
4330
4331
4332
4333
4334enum ice_status
4335ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4336 struct ice_port_info *pi)
4337{
4338 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4339 struct ice_hw *hw = pi->hw;
4340 enum ice_status status;
4341
4342 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4343 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4344 if (status) {
4345 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
4346 return status;
4347 }
4348
4349
4350 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4351 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4352
4353
4354 status = ice_read_sr_word(hw, tlv_start, &buf);
4355 if (status) {
4356 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4357 return status;
4358 }
4359 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4360 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4361 ICE_LINK_OVERRIDE_PHY_CFG_S;
4362
4363
4364 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4365 status = ice_read_sr_word(hw, offset, &buf);
4366 if (status) {
4367 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
4368 return status;
4369 }
4370 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4371
4372
4373 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4374 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4375 status = ice_read_sr_word(hw, (offset + i), &buf);
4376 if (status) {
4377 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4378 return status;
4379 }
4380
4381 ldo->phy_type_low |= ((u64)buf << (i * 16));
4382 }
4383
4384
4385 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4386 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4387 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4388 status = ice_read_sr_word(hw, (offset + i), &buf);
4389 if (status) {
4390 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
4391 return status;
4392 }
4393
4394 ldo->phy_type_high |= ((u64)buf << (i * 16));
4395 }
4396
4397 return status;
4398}
4399
4400
4401
4402
4403
4404bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
4405{
4406 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
4407 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
4408 ICE_AQC_PHY_AN_EN_CLAUSE73 |
4409 ICE_AQC_PHY_AN_EN_CLAUSE37))
4410 return true;
4411
4412 return false;
4413}
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425enum ice_status
4426ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
4427 struct ice_sq_cd *cd)
4428{
4429 struct ice_aqc_lldp_set_local_mib *cmd;
4430 struct ice_aq_desc desc;
4431
4432 cmd = &desc.params.lldp_set_mib;
4433
4434 if (buf_size == 0 || !buf)
4435 return ICE_ERR_PARAM;
4436
4437 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
4438
4439 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
4440 desc.datalen = cpu_to_le16(buf_size);
4441
4442 cmd->type = mib_type;
4443 cmd->length = cpu_to_le16(buf_size);
4444
4445 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4446}
4447
4448
4449
4450
4451
4452bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
4453{
4454 if (hw->mac_type != ICE_MAC_E810)
4455 return false;
4456
4457 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
4458 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
4459 return true;
4460 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
4461 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
4462 return true;
4463 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
4464 return true;
4465 }
4466 return false;
4467}
4468
4469
4470
4471
4472
4473
4474
4475enum ice_status
4476ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
4477{
4478 struct ice_aqc_lldp_filter_ctrl *cmd;
4479 struct ice_aq_desc desc;
4480
4481 cmd = &desc.params.lldp_filter_ctrl;
4482
4483 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
4484
4485 if (add)
4486 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
4487 else
4488 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
4489
4490 cmd->vsi_num = cpu_to_le16(vsi_num);
4491
4492 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4493}
4494
4495
4496
4497
4498
4499
4500
4501bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
4502{
4503 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4504 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
4505 return true;
4506 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
4507 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
4508 return true;
4509 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
4510 return true;
4511 }
4512 return false;
4513}
4514