1
2
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7
8#define ICE_PF_RESET_WAIT_COUNT 200
9
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29
30
31
32
33
34
35
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45
46
47
48
49
50
51
52void ice_dev_onetime_setup(struct ice_hw *hw)
53{
54
55 wr32(hw, GLLAN_RCTL_0, 0x1);
56
57#define MBX_PF_VT_PFALLOC 0x00231E80
58
59 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
60}
61
62
63
64
65
66
67
68
69enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
70{
71 struct ice_aq_desc desc;
72
73 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
74
75 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
76}
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92static enum ice_status
93ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
94 struct ice_sq_cd *cd)
95{
96 struct ice_aqc_manage_mac_read_resp *resp;
97 struct ice_aqc_manage_mac_read *cmd;
98 struct ice_aq_desc desc;
99 enum ice_status status;
100 u16 flags;
101 u8 i;
102
103 cmd = &desc.params.mac_read;
104
105 if (buf_size < sizeof(*resp))
106 return ICE_ERR_BUF_TOO_SHORT;
107
108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
109
110 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
111 if (status)
112 return status;
113
114 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
115 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
116
117 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
118 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
119 return ICE_ERR_CFG;
120 }
121
122
123 for (i = 0; i < cmd->num_addr; i++)
124 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
125 ether_addr_copy(hw->port_info->mac.lan_addr,
126 resp[i].mac_addr);
127 ether_addr_copy(hw->port_info->mac.perm_addr,
128 resp[i].mac_addr);
129 break;
130 }
131
132 return 0;
133}
134
135
136
137
138
139
140
141
142
143
144
145enum ice_status
146ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
147 struct ice_aqc_get_phy_caps_data *pcaps,
148 struct ice_sq_cd *cd)
149{
150 struct ice_aqc_get_phy_caps *cmd;
151 u16 pcaps_size = sizeof(*pcaps);
152 struct ice_aq_desc desc;
153 enum ice_status status;
154
155 cmd = &desc.params.get_phy;
156
157 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
158 return ICE_ERR_PARAM;
159
160 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
161
162 if (qual_mods)
163 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
164
165 cmd->param0 |= cpu_to_le16(report_mode);
166 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
167
168 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP)
169 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
170
171 return status;
172}
173
174
175
176
177
178static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
179{
180 struct ice_link_status *hw_link_info;
181
182 if (!pi)
183 return ICE_MEDIA_UNKNOWN;
184
185 hw_link_info = &pi->phy.link_info;
186
187 if (hw_link_info->phy_type_low) {
188 switch (hw_link_info->phy_type_low) {
189 case ICE_PHY_TYPE_LOW_1000BASE_SX:
190 case ICE_PHY_TYPE_LOW_1000BASE_LX:
191 case ICE_PHY_TYPE_LOW_10GBASE_SR:
192 case ICE_PHY_TYPE_LOW_10GBASE_LR:
193 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
194 case ICE_PHY_TYPE_LOW_25GBASE_SR:
195 case ICE_PHY_TYPE_LOW_25GBASE_LR:
196 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
197 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
198 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
199 return ICE_MEDIA_FIBER;
200 case ICE_PHY_TYPE_LOW_100BASE_TX:
201 case ICE_PHY_TYPE_LOW_1000BASE_T:
202 case ICE_PHY_TYPE_LOW_2500BASE_T:
203 case ICE_PHY_TYPE_LOW_5GBASE_T:
204 case ICE_PHY_TYPE_LOW_10GBASE_T:
205 case ICE_PHY_TYPE_LOW_25GBASE_T:
206 return ICE_MEDIA_BASET;
207 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
208 case ICE_PHY_TYPE_LOW_25GBASE_CR:
209 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
210 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
211 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
212 return ICE_MEDIA_DA;
213 case ICE_PHY_TYPE_LOW_1000BASE_KX:
214 case ICE_PHY_TYPE_LOW_2500BASE_KX:
215 case ICE_PHY_TYPE_LOW_2500BASE_X:
216 case ICE_PHY_TYPE_LOW_5GBASE_KR:
217 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
218 case ICE_PHY_TYPE_LOW_25GBASE_KR:
219 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
220 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
221 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
222 return ICE_MEDIA_BACKPLANE;
223 }
224 }
225
226 return ICE_MEDIA_UNKNOWN;
227}
228
229
230
231
232
233
234
235
236
237
238static enum ice_status
239ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
240 struct ice_link_status *link, struct ice_sq_cd *cd)
241{
242 struct ice_link_status *hw_link_info_old, *hw_link_info;
243 struct ice_aqc_get_link_status_data link_data = { 0 };
244 struct ice_aqc_get_link_status *resp;
245 enum ice_media_type *hw_media_type;
246 struct ice_fc_info *hw_fc_info;
247 bool tx_pause, rx_pause;
248 struct ice_aq_desc desc;
249 enum ice_status status;
250 u16 cmd_flags;
251
252 if (!pi)
253 return ICE_ERR_PARAM;
254 hw_link_info_old = &pi->phy.link_info_old;
255 hw_media_type = &pi->phy.media_type;
256 hw_link_info = &pi->phy.link_info;
257 hw_fc_info = &pi->fc;
258
259 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
260 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
261 resp = &desc.params.get_link_status;
262 resp->cmd_flags = cpu_to_le16(cmd_flags);
263 resp->lport_num = pi->lport;
264
265 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
266 cd);
267
268 if (status)
269 return status;
270
271
272 *hw_link_info_old = *hw_link_info;
273
274
275 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
276 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
277 *hw_media_type = ice_get_media_type(pi);
278 hw_link_info->link_info = link_data.link_info;
279 hw_link_info->an_info = link_data.an_info;
280 hw_link_info->ext_info = link_data.ext_info;
281 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
282 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
283
284
285 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
286 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
287 if (tx_pause && rx_pause)
288 hw_fc_info->current_mode = ICE_FC_FULL;
289 else if (tx_pause)
290 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
291 else if (rx_pause)
292 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
293 else
294 hw_fc_info->current_mode = ICE_FC_NONE;
295
296 hw_link_info->lse_ena =
297 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
298
299
300 if (link)
301 *link = *hw_link_info;
302
303
304 pi->phy.get_link_info = false;
305
306 return status;
307}
308
309
310
311
312
313
314
315
316static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
317{
318 u8 idx = 0;
319
320
321
322
323
324
325
326 switch (prof_id) {
327
328
329
330
331 case ICE_RXDID_FLEX_NIC:
332 case ICE_RXDID_FLEX_NIC_2:
333 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
334 ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
335 ICE_RXFLG_FIN, idx++);
336
337
338
339 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
340 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
341 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
342 ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
343 ICE_RXFLG_EVLAN_x9100, idx++);
344 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
345 ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
346 ICE_RXFLG_TNL0, idx++);
347 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
348 ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
349 break;
350
351 default:
352 ice_debug(hw, ICE_DBG_INIT,
353 "Flag programming for profile ID %d not supported\n",
354 prof_id);
355 }
356}
357
358
359
360
361
362
363
364
365static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
366{
367 enum ice_flex_rx_mdid mdid;
368
369 switch (prof_id) {
370 case ICE_RXDID_FLEX_NIC:
371 case ICE_RXDID_FLEX_NIC_2:
372 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
373 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
374 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
375
376 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
377 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
378
379 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
380
381 ice_init_flex_flags(hw, prof_id);
382 break;
383
384 default:
385 ice_debug(hw, ICE_DBG_INIT,
386 "Field init for profile ID %d not supported\n",
387 prof_id);
388 }
389}
390
391
392
393
394
395static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
396{
397 struct ice_switch_info *sw;
398
399 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
400 sizeof(*hw->switch_info), GFP_KERNEL);
401 sw = hw->switch_info;
402
403 if (!sw)
404 return ICE_ERR_NO_MEMORY;
405
406 INIT_LIST_HEAD(&sw->vsi_list_map_head);
407
408 ice_init_def_sw_recp(hw);
409
410 return 0;
411}
412
413
414
415
416
417static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
418{
419 struct ice_switch_info *sw = hw->switch_info;
420 struct ice_vsi_list_map_info *v_pos_map;
421 struct ice_vsi_list_map_info *v_tmp_map;
422 struct ice_sw_recipe *recps;
423 u8 i;
424
425 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
426 list_entry) {
427 list_del(&v_pos_map->list_entry);
428 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
429 }
430 recps = hw->switch_info->recp_list;
431 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
432 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
433
434 recps[i].root_rid = i;
435 mutex_destroy(&recps[i].filt_rule_lock);
436 list_for_each_entry_safe(lst_itr, tmp_entry,
437 &recps[i].filt_rules, list_entry) {
438 list_del(&lst_itr->list_entry);
439 devm_kfree(ice_hw_to_dev(hw), lst_itr);
440 }
441 }
442 ice_rm_all_sw_replay_rule_info(hw);
443 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
444 devm_kfree(ice_hw_to_dev(hw), sw);
445}
446
447#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
448 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
449#define ICE_FW_LOG_DESC_SIZE_MAX \
450 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
489{
490 struct ice_aqc_fw_logging_data *data = NULL;
491 struct ice_aqc_fw_logging *cmd;
492 enum ice_status status = 0;
493 u16 i, chgs = 0, len = 0;
494 struct ice_aq_desc desc;
495 u8 actv_evnts = 0;
496 void *buf = NULL;
497
498 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
499 return 0;
500
501
502 if (!enable &&
503 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
504 return 0;
505
506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
507 cmd = &desc.params.fw_logging;
508
509
510 if (hw->fw_log.cq_en)
511 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
512
513 if (hw->fw_log.uart_en)
514 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
515
516 if (enable) {
517
518
519
520 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
521 u16 val;
522
523
524 actv_evnts |= hw->fw_log.evnts[i].cfg;
525
526 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
527 continue;
528
529 if (!data) {
530 data = devm_kzalloc(ice_hw_to_dev(hw),
531 ICE_FW_LOG_DESC_SIZE_MAX,
532 GFP_KERNEL);
533 if (!data)
534 return ICE_ERR_NO_MEMORY;
535 }
536
537 val = i << ICE_AQC_FW_LOG_ID_S;
538 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
539 data->entry[chgs++] = cpu_to_le16(val);
540 }
541
542
543
544
545
546 if (actv_evnts) {
547
548 if (!chgs)
549 goto out;
550
551 if (hw->fw_log.cq_en)
552 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
553
554 if (hw->fw_log.uart_en)
555 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
556
557 buf = data;
558 len = ICE_FW_LOG_DESC_SIZE(chgs);
559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
560 }
561 }
562
563 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
564 if (!status) {
565
566
567
568
569
570
571 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
572
573 hw->fw_log.actv_evnts = actv_evnts;
574 for (i = 0; i < cnt; i++) {
575 u16 v, m;
576
577 if (!enable) {
578
579
580
581
582
583
584 hw->fw_log.evnts[i].cur = 0;
585 continue;
586 }
587
588 v = le16_to_cpu(data->entry[i]);
589 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
590 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
591 }
592 }
593
594out:
595 if (data)
596 devm_kfree(ice_hw_to_dev(hw), data);
597
598 return status;
599}
600
601
602
603
604
605
606
607
608
609void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
610{
611 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n");
612 ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf,
613 le16_to_cpu(desc->datalen));
614 ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n");
615}
616
617
618
619
620
621
622
623
624static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
625{
626 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
627 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
628 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
629
630 switch (max_agg_bw) {
631 case ICE_MAX_AGG_BW_200G:
632 case ICE_MAX_AGG_BW_100G:
633 case ICE_MAX_AGG_BW_50G:
634 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
635 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
636 break;
637 case ICE_MAX_AGG_BW_25G:
638 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
639 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
640 break;
641 default:
642 ice_debug(hw, ICE_DBG_INIT,
643 "Failed to determine itr/intrl granularity\n");
644 return ICE_ERR_CFG;
645 }
646
647 return 0;
648}
649
650
651
652
653
654enum ice_status ice_init_hw(struct ice_hw *hw)
655{
656 struct ice_aqc_get_phy_caps_data *pcaps;
657 enum ice_status status;
658 u16 mac_buf_len;
659 void *mac_buf;
660
661
662 status = ice_set_mac_type(hw);
663 if (status)
664 return status;
665
666 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
667 PF_FUNC_RID_FUNC_NUM_M) >>
668 PF_FUNC_RID_FUNC_NUM_S;
669
670 status = ice_reset(hw, ICE_RESET_PFR);
671 if (status)
672 return status;
673
674 status = ice_get_itr_intrl_gran(hw);
675 if (status)
676 return status;
677
678 status = ice_init_all_ctrlq(hw);
679 if (status)
680 goto err_unroll_cqinit;
681
682
683 status = ice_cfg_fw_log(hw, true);
684 if (status)
685 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
686
687 status = ice_clear_pf_cfg(hw);
688 if (status)
689 goto err_unroll_cqinit;
690
691 ice_clear_pxe_mode(hw);
692
693 status = ice_init_nvm(hw);
694 if (status)
695 goto err_unroll_cqinit;
696
697 status = ice_get_caps(hw);
698 if (status)
699 goto err_unroll_cqinit;
700
701 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
702 sizeof(*hw->port_info), GFP_KERNEL);
703 if (!hw->port_info) {
704 status = ICE_ERR_NO_MEMORY;
705 goto err_unroll_cqinit;
706 }
707
708
709 hw->port_info->hw = hw;
710
711
712 status = ice_get_initial_sw_cfg(hw);
713 if (status)
714 goto err_unroll_alloc;
715
716 hw->evb_veb = true;
717
718
719 status = ice_sched_query_res_alloc(hw);
720 if (status) {
721 ice_debug(hw, ICE_DBG_SCHED,
722 "Failed to get scheduler allocated resources\n");
723 goto err_unroll_alloc;
724 }
725
726
727 status = ice_sched_init_port(hw->port_info);
728 if (status)
729 goto err_unroll_sched;
730
731 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
732 if (!pcaps) {
733 status = ICE_ERR_NO_MEMORY;
734 goto err_unroll_sched;
735 }
736
737
738 status = ice_aq_get_phy_caps(hw->port_info, false,
739 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
740 devm_kfree(ice_hw_to_dev(hw), pcaps);
741 if (status)
742 goto err_unroll_sched;
743
744
745 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
746 if (status)
747 goto err_unroll_sched;
748
749
750 if (!hw->sw_entry_point_layer) {
751 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
752 status = ICE_ERR_CFG;
753 goto err_unroll_sched;
754 }
755
756 status = ice_init_fltr_mgmt_struct(hw);
757 if (status)
758 goto err_unroll_sched;
759
760 ice_dev_onetime_setup(hw);
761
762
763
764 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
765 sizeof(struct ice_aqc_manage_mac_read_resp),
766 GFP_KERNEL);
767 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
768
769 if (!mac_buf) {
770 status = ICE_ERR_NO_MEMORY;
771 goto err_unroll_fltr_mgmt_struct;
772 }
773
774 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
775 devm_kfree(ice_hw_to_dev(hw), mac_buf);
776
777 if (status)
778 goto err_unroll_fltr_mgmt_struct;
779
780 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
781 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
782
783 return 0;
784
785err_unroll_fltr_mgmt_struct:
786 ice_cleanup_fltr_mgmt_struct(hw);
787err_unroll_sched:
788 ice_sched_cleanup_all(hw);
789err_unroll_alloc:
790 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
791err_unroll_cqinit:
792 ice_shutdown_all_ctrlq(hw);
793 return status;
794}
795
796
797
798
799
800void ice_deinit_hw(struct ice_hw *hw)
801{
802 ice_cleanup_fltr_mgmt_struct(hw);
803
804 ice_sched_cleanup_all(hw);
805
806 if (hw->port_info) {
807 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
808 hw->port_info = NULL;
809 }
810
811
812 ice_cfg_fw_log(hw, false);
813 ice_shutdown_all_ctrlq(hw);
814
815
816 ice_clear_all_vsi_ctx(hw);
817}
818
819
820
821
822
823enum ice_status ice_check_reset(struct ice_hw *hw)
824{
825 u32 cnt, reg = 0, grst_delay;
826
827
828
829
830
831 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
832 GLGEN_RSTCTL_GRSTDEL_S) + 10;
833
834 for (cnt = 0; cnt < grst_delay; cnt++) {
835 mdelay(100);
836 reg = rd32(hw, GLGEN_RSTAT);
837 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
838 break;
839 }
840
841 if (cnt == grst_delay) {
842 ice_debug(hw, ICE_DBG_INIT,
843 "Global reset polling failed to complete.\n");
844 return ICE_ERR_RESET_FAILED;
845 }
846
847#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
848 GLNVM_ULD_GLOBR_DONE_M)
849
850
851 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
852 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
853 if (reg == ICE_RESET_DONE_MASK) {
854 ice_debug(hw, ICE_DBG_INIT,
855 "Global reset processes done. %d\n", cnt);
856 break;
857 }
858 mdelay(10);
859 }
860
861 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
862 ice_debug(hw, ICE_DBG_INIT,
863 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
864 reg);
865 return ICE_ERR_RESET_FAILED;
866 }
867
868 return 0;
869}
870
871
872
873
874
875
876
877
878static enum ice_status ice_pf_reset(struct ice_hw *hw)
879{
880 u32 cnt, reg;
881
882
883
884
885
886
887 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
888 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
889
890 if (ice_check_reset(hw))
891 return ICE_ERR_RESET_FAILED;
892
893 return 0;
894 }
895
896
897 reg = rd32(hw, PFGEN_CTRL);
898
899 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
900
901 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
902 reg = rd32(hw, PFGEN_CTRL);
903 if (!(reg & PFGEN_CTRL_PFSWR_M))
904 break;
905
906 mdelay(1);
907 }
908
909 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
910 ice_debug(hw, ICE_DBG_INIT,
911 "PF reset polling failed to complete.\n");
912 return ICE_ERR_RESET_FAILED;
913 }
914
915 return 0;
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
931{
932 u32 val = 0;
933
934 switch (req) {
935 case ICE_RESET_PFR:
936 return ice_pf_reset(hw);
937 case ICE_RESET_CORER:
938 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
939 val = GLGEN_RTRIG_CORER_M;
940 break;
941 case ICE_RESET_GLOBR:
942 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
943 val = GLGEN_RTRIG_GLOBR_M;
944 break;
945 default:
946 return ICE_ERR_PARAM;
947 }
948
949 val |= rd32(hw, GLGEN_RTRIG);
950 wr32(hw, GLGEN_RTRIG, val);
951 ice_flush(hw);
952
953
954 return ice_check_reset(hw);
955}
956
957
958
959
960
961
962
963
964
965static enum ice_status
966ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
967{
968 u8 i;
969
970 if (!ice_rxq_ctx)
971 return ICE_ERR_BAD_PTR;
972
973 if (rxq_index > QRX_CTRL_MAX_INDEX)
974 return ICE_ERR_PARAM;
975
976
977 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
978 wr32(hw, QRX_CONTEXT(i, rxq_index),
979 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
980
981 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
982 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
983 }
984
985 return 0;
986}
987
988
989static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
990
991 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
992 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
993 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
994 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
995 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
996 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
997 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
998 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
999 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1000 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1001 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1002 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1003 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1004 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1005 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1006 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1007 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1008 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1009 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1010 { 0 }
1011};
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022enum ice_status
1023ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1024 u32 rxq_index)
1025{
1026 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1027
1028 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1029 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1030}
1031
1032
1033const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1034
1035 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1036 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1037 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1038 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1039 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1040 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1041 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1042 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1043 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1044 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1045 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1046 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1047 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1048 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1049 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1050 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1051 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1052 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1053 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1054 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1055 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1056 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1057 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1058 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1059 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1060 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1061 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1062 { 0 }
1063};
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
1076 void *buf, u16 buf_len)
1077{
1078 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1079 u16 len;
1080
1081#ifndef CONFIG_DYNAMIC_DEBUG
1082 if (!(mask & hw->debug_mask))
1083 return;
1084#endif
1085
1086 if (!desc)
1087 return;
1088
1089 len = le16_to_cpu(cq_desc->datalen);
1090
1091 ice_debug(hw, mask,
1092 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1093 le16_to_cpu(cq_desc->opcode),
1094 le16_to_cpu(cq_desc->flags),
1095 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1096 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1097 le32_to_cpu(cq_desc->cookie_high),
1098 le32_to_cpu(cq_desc->cookie_low));
1099 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1100 le32_to_cpu(cq_desc->params.generic.param0),
1101 le32_to_cpu(cq_desc->params.generic.param1));
1102 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1103 le32_to_cpu(cq_desc->params.generic.addr_high),
1104 le32_to_cpu(cq_desc->params.generic.addr_low));
1105 if (buf && cq_desc->datalen != 0) {
1106 ice_debug(hw, mask, "Buffer:\n");
1107 if (buf_len < len)
1108 len = buf_len;
1109
1110 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1111 }
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126enum ice_status
1127ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1128 u16 buf_size, struct ice_sq_cd *cd)
1129{
1130 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1141{
1142 struct ice_aqc_get_ver *resp;
1143 struct ice_aq_desc desc;
1144 enum ice_status status;
1145
1146 resp = &desc.params.get_ver;
1147
1148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1149
1150 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1151
1152 if (!status) {
1153 hw->fw_branch = resp->fw_branch;
1154 hw->fw_maj_ver = resp->fw_major;
1155 hw->fw_min_ver = resp->fw_minor;
1156 hw->fw_patch = resp->fw_patch;
1157 hw->fw_build = le32_to_cpu(resp->fw_build);
1158 hw->api_branch = resp->api_branch;
1159 hw->api_maj_ver = resp->api_major;
1160 hw->api_min_ver = resp->api_minor;
1161 hw->api_patch = resp->api_patch;
1162 }
1163
1164 return status;
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1176{
1177 struct ice_aqc_q_shutdown *cmd;
1178 struct ice_aq_desc desc;
1179
1180 cmd = &desc.params.q_shutdown;
1181
1182 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1183
1184 if (unloading)
1185 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1186
1187 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static enum ice_status
1217ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1218 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1219 struct ice_sq_cd *cd)
1220{
1221 struct ice_aqc_req_res *cmd_resp;
1222 struct ice_aq_desc desc;
1223 enum ice_status status;
1224
1225 cmd_resp = &desc.params.res_owner;
1226
1227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1228
1229 cmd_resp->res_id = cpu_to_le16(res);
1230 cmd_resp->access_type = cpu_to_le16(access);
1231 cmd_resp->res_number = cpu_to_le32(sdp_number);
1232 cmd_resp->timeout = cpu_to_le32(*timeout);
1233 *timeout = 0;
1234
1235 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1249 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1250 *timeout = le32_to_cpu(cmd_resp->timeout);
1251 return 0;
1252 } else if (le16_to_cpu(cmd_resp->status) ==
1253 ICE_AQ_RES_GLBL_IN_PROG) {
1254 *timeout = le32_to_cpu(cmd_resp->timeout);
1255 return ICE_ERR_AQ_ERROR;
1256 } else if (le16_to_cpu(cmd_resp->status) ==
1257 ICE_AQ_RES_GLBL_DONE) {
1258 return ICE_ERR_AQ_NO_WORK;
1259 }
1260
1261
1262 *timeout = 0;
1263 return ICE_ERR_AQ_ERROR;
1264 }
1265
1266
1267
1268
1269
1270 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1271 *timeout = le32_to_cpu(cmd_resp->timeout);
1272
1273 return status;
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285static enum ice_status
1286ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1287 struct ice_sq_cd *cd)
1288{
1289 struct ice_aqc_req_res *cmd;
1290 struct ice_aq_desc desc;
1291
1292 cmd = &desc.params.res_owner;
1293
1294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1295
1296 cmd->res_id = cpu_to_le16(res);
1297 cmd->res_number = cpu_to_le32(sdp_number);
1298
1299 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311enum ice_status
1312ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1313 enum ice_aq_res_access_type access, u32 timeout)
1314{
1315#define ICE_RES_POLLING_DELAY_MS 10
1316 u32 delay = ICE_RES_POLLING_DELAY_MS;
1317 u32 time_left = timeout;
1318 enum ice_status status;
1319
1320 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1321
1322
1323
1324
1325
1326
1327 if (status == ICE_ERR_AQ_NO_WORK)
1328 goto ice_acquire_res_exit;
1329
1330 if (status)
1331 ice_debug(hw, ICE_DBG_RES,
1332 "resource %d acquire type %d failed.\n", res, access);
1333
1334
1335 timeout = time_left;
1336 while (status && timeout && time_left) {
1337 mdelay(delay);
1338 timeout = (timeout > delay) ? timeout - delay : 0;
1339 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1340
1341 if (status == ICE_ERR_AQ_NO_WORK)
1342
1343 break;
1344
1345 if (!status)
1346
1347 break;
1348 }
1349 if (status && status != ICE_ERR_AQ_NO_WORK)
1350 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1351
1352ice_acquire_res_exit:
1353 if (status == ICE_ERR_AQ_NO_WORK) {
1354 if (access == ICE_RES_WRITE)
1355 ice_debug(hw, ICE_DBG_RES,
1356 "resource indicates no work to do.\n");
1357 else
1358 ice_debug(hw, ICE_DBG_RES,
1359 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1360 }
1361 return status;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1372{
1373 enum ice_status status;
1374 u32 total_delay = 0;
1375
1376 status = ice_aq_release_res(hw, res, 0, NULL);
1377
1378
1379
1380
1381 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1382 (total_delay < hw->adminq.sq_cmd_timeout)) {
1383 mdelay(1);
1384 status = ice_aq_release_res(hw, res, 0, NULL);
1385 total_delay++;
1386 }
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static void
1399ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1400 enum ice_adminq_opc opc)
1401{
1402 struct ice_aqc_list_caps_elem *cap_resp;
1403 struct ice_hw_func_caps *func_p = NULL;
1404 struct ice_hw_dev_caps *dev_p = NULL;
1405 struct ice_hw_common_caps *caps;
1406 u32 i;
1407
1408 if (!buf)
1409 return;
1410
1411 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1412
1413 if (opc == ice_aqc_opc_list_dev_caps) {
1414 dev_p = &hw->dev_caps;
1415 caps = &dev_p->common_cap;
1416 } else if (opc == ice_aqc_opc_list_func_caps) {
1417 func_p = &hw->func_caps;
1418 caps = &func_p->common_cap;
1419 } else {
1420 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1421 return;
1422 }
1423
1424 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1425 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1426 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1427 u32 number = le32_to_cpu(cap_resp->number);
1428 u16 cap = le16_to_cpu(cap_resp->cap);
1429
1430 switch (cap) {
1431 case ICE_AQC_CAPS_SRIOV:
1432 caps->sr_iov_1_1 = (number == 1);
1433 ice_debug(hw, ICE_DBG_INIT,
1434 "HW caps: SR-IOV = %d\n", caps->sr_iov_1_1);
1435 break;
1436 case ICE_AQC_CAPS_VF:
1437 if (dev_p) {
1438 dev_p->num_vfs_exposed = number;
1439 ice_debug(hw, ICE_DBG_INIT,
1440 "HW caps: VFs exposed = %d\n",
1441 dev_p->num_vfs_exposed);
1442 } else if (func_p) {
1443 func_p->num_allocd_vfs = number;
1444 func_p->vf_base_id = logical_id;
1445 ice_debug(hw, ICE_DBG_INIT,
1446 "HW caps: VFs allocated = %d\n",
1447 func_p->num_allocd_vfs);
1448 ice_debug(hw, ICE_DBG_INIT,
1449 "HW caps: VF base_id = %d\n",
1450 func_p->vf_base_id);
1451 }
1452 break;
1453 case ICE_AQC_CAPS_VSI:
1454 if (dev_p) {
1455 dev_p->num_vsi_allocd_to_host = number;
1456 ice_debug(hw, ICE_DBG_INIT,
1457 "HW caps: Dev.VSI cnt = %d\n",
1458 dev_p->num_vsi_allocd_to_host);
1459 } else if (func_p) {
1460 func_p->guaranteed_num_vsi = number;
1461 ice_debug(hw, ICE_DBG_INIT,
1462 "HW caps: Func.VSI cnt = %d\n",
1463 func_p->guaranteed_num_vsi);
1464 }
1465 break;
1466 case ICE_AQC_CAPS_RSS:
1467 caps->rss_table_size = number;
1468 caps->rss_table_entry_width = logical_id;
1469 ice_debug(hw, ICE_DBG_INIT,
1470 "HW caps: RSS table size = %d\n",
1471 caps->rss_table_size);
1472 ice_debug(hw, ICE_DBG_INIT,
1473 "HW caps: RSS table width = %d\n",
1474 caps->rss_table_entry_width);
1475 break;
1476 case ICE_AQC_CAPS_RXQS:
1477 caps->num_rxq = number;
1478 caps->rxq_first_id = phys_id;
1479 ice_debug(hw, ICE_DBG_INIT,
1480 "HW caps: Num Rx Qs = %d\n", caps->num_rxq);
1481 ice_debug(hw, ICE_DBG_INIT,
1482 "HW caps: Rx first queue ID = %d\n",
1483 caps->rxq_first_id);
1484 break;
1485 case ICE_AQC_CAPS_TXQS:
1486 caps->num_txq = number;
1487 caps->txq_first_id = phys_id;
1488 ice_debug(hw, ICE_DBG_INIT,
1489 "HW caps: Num Tx Qs = %d\n", caps->num_txq);
1490 ice_debug(hw, ICE_DBG_INIT,
1491 "HW caps: Tx first queue ID = %d\n",
1492 caps->txq_first_id);
1493 break;
1494 case ICE_AQC_CAPS_MSIX:
1495 caps->num_msix_vectors = number;
1496 caps->msix_vector_first_id = phys_id;
1497 ice_debug(hw, ICE_DBG_INIT,
1498 "HW caps: MSIX vector count = %d\n",
1499 caps->num_msix_vectors);
1500 ice_debug(hw, ICE_DBG_INIT,
1501 "HW caps: MSIX first vector index = %d\n",
1502 caps->msix_vector_first_id);
1503 break;
1504 case ICE_AQC_CAPS_MAX_MTU:
1505 caps->max_mtu = number;
1506 if (dev_p)
1507 ice_debug(hw, ICE_DBG_INIT,
1508 "HW caps: Dev.MaxMTU = %d\n",
1509 caps->max_mtu);
1510 else if (func_p)
1511 ice_debug(hw, ICE_DBG_INIT,
1512 "HW caps: func.MaxMTU = %d\n",
1513 caps->max_mtu);
1514 break;
1515 default:
1516 ice_debug(hw, ICE_DBG_INIT,
1517 "HW caps: Unknown capability[%d]: 0x%x\n", i,
1518 cap);
1519 break;
1520 }
1521 }
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static enum ice_status
1537ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1538 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1539{
1540 struct ice_aqc_list_caps *cmd;
1541 struct ice_aq_desc desc;
1542 enum ice_status status;
1543
1544 cmd = &desc.params.get_cap;
1545
1546 if (opc != ice_aqc_opc_list_func_caps &&
1547 opc != ice_aqc_opc_list_dev_caps)
1548 return ICE_ERR_PARAM;
1549
1550 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1551
1552 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1553 if (!status)
1554 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1555 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1556 *cap_count = le32_to_cpu(cmd->count);
1557 return status;
1558}
1559
1560
1561
1562
1563
1564
1565static enum ice_status ice_discover_caps(struct ice_hw *hw,
1566 enum ice_adminq_opc opc)
1567{
1568 enum ice_status status;
1569 u32 cap_count;
1570 u16 cbuf_len;
1571 u8 retries;
1572
1573
1574
1575
1576
1577
1578
1579
1580#define ICE_GET_CAP_BUF_COUNT 40
1581#define ICE_GET_CAP_RETRY_COUNT 2
1582
1583 cap_count = ICE_GET_CAP_BUF_COUNT;
1584 retries = ICE_GET_CAP_RETRY_COUNT;
1585
1586 do {
1587 void *cbuf;
1588
1589 cbuf_len = (u16)(cap_count *
1590 sizeof(struct ice_aqc_list_caps_elem));
1591 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1592 if (!cbuf)
1593 return ICE_ERR_NO_MEMORY;
1594
1595 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1596 opc, NULL);
1597 devm_kfree(ice_hw_to_dev(hw), cbuf);
1598
1599 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1600 break;
1601
1602
1603 } while (--retries);
1604
1605 return status;
1606}
1607
1608
1609
1610
1611
1612enum ice_status ice_get_caps(struct ice_hw *hw)
1613{
1614 enum ice_status status;
1615
1616 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1617 if (!status)
1618 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1619
1620 return status;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632enum ice_status
1633ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags,
1634 struct ice_sq_cd *cd)
1635{
1636 struct ice_aqc_manage_mac_write *cmd;
1637 struct ice_aq_desc desc;
1638
1639 cmd = &desc.params.mac_write;
1640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1641
1642 cmd->flags = flags;
1643
1644
1645 cmd->sah = htons(*((u16 *)mac_addr));
1646 cmd->sal = htonl(*((u32 *)(mac_addr + 2)));
1647
1648 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1649}
1650
1651
1652
1653
1654
1655
1656
1657static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1658{
1659 struct ice_aq_desc desc;
1660
1661 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1662 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1663
1664 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674void ice_clear_pxe_mode(struct ice_hw *hw)
1675{
1676 if (ice_check_sq_alive(hw, &hw->adminq))
1677 ice_aq_clear_pxe_mode(hw);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static u16
1692ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
1693{
1694 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1695
1696 switch (phy_type_low) {
1697 case ICE_PHY_TYPE_LOW_100BASE_TX:
1698 case ICE_PHY_TYPE_LOW_100M_SGMII:
1699 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1700 break;
1701 case ICE_PHY_TYPE_LOW_1000BASE_T:
1702 case ICE_PHY_TYPE_LOW_1000BASE_SX:
1703 case ICE_PHY_TYPE_LOW_1000BASE_LX:
1704 case ICE_PHY_TYPE_LOW_1000BASE_KX:
1705 case ICE_PHY_TYPE_LOW_1G_SGMII:
1706 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1707 break;
1708 case ICE_PHY_TYPE_LOW_2500BASE_T:
1709 case ICE_PHY_TYPE_LOW_2500BASE_X:
1710 case ICE_PHY_TYPE_LOW_2500BASE_KX:
1711 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1712 break;
1713 case ICE_PHY_TYPE_LOW_5GBASE_T:
1714 case ICE_PHY_TYPE_LOW_5GBASE_KR:
1715 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1716 break;
1717 case ICE_PHY_TYPE_LOW_10GBASE_T:
1718 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1719 case ICE_PHY_TYPE_LOW_10GBASE_SR:
1720 case ICE_PHY_TYPE_LOW_10GBASE_LR:
1721 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1722 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1723 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1724 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1725 break;
1726 case ICE_PHY_TYPE_LOW_25GBASE_T:
1727 case ICE_PHY_TYPE_LOW_25GBASE_CR:
1728 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1729 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1730 case ICE_PHY_TYPE_LOW_25GBASE_SR:
1731 case ICE_PHY_TYPE_LOW_25GBASE_LR:
1732 case ICE_PHY_TYPE_LOW_25GBASE_KR:
1733 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1734 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1735 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1736 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1737 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1738 break;
1739 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1740 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1741 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1742 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1743 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1744 case ICE_PHY_TYPE_LOW_40G_XLAUI:
1745 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1746 break;
1747 default:
1748 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1749 break;
1750 }
1751
1752 return speed_phy_type_low;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap)
1769{
1770 u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
1771 u64 pt_low;
1772 int index;
1773
1774
1775 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1776 pt_low = BIT_ULL(index);
1777 speed = ice_get_link_speed_based_on_phy_type(pt_low);
1778
1779 if (link_speeds_bitmap & speed)
1780 *phy_type_low |= BIT_ULL(index);
1781 }
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796enum ice_status
1797ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1798 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1799{
1800 struct ice_aq_desc desc;
1801
1802 if (!cfg)
1803 return ICE_ERR_PARAM;
1804
1805 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1806 desc.params.set_phy.lport_num = lport;
1807 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1808
1809 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1810}
1811
1812
1813
1814
1815
1816enum ice_status ice_update_link_info(struct ice_port_info *pi)
1817{
1818 struct ice_aqc_get_phy_caps_data *pcaps;
1819 struct ice_phy_info *phy_info;
1820 enum ice_status status;
1821 struct ice_hw *hw;
1822
1823 if (!pi)
1824 return ICE_ERR_PARAM;
1825
1826 hw = pi->hw;
1827
1828 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1829 if (!pcaps)
1830 return ICE_ERR_NO_MEMORY;
1831
1832 phy_info = &pi->phy;
1833 status = ice_aq_get_link_info(pi, true, NULL, NULL);
1834 if (status)
1835 goto out;
1836
1837 if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
1838 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
1839 pcaps, NULL);
1840 if (status)
1841 goto out;
1842
1843 memcpy(phy_info->link_info.module_type, &pcaps->module_type,
1844 sizeof(phy_info->link_info.module_type));
1845 }
1846out:
1847 devm_kfree(ice_hw_to_dev(hw), pcaps);
1848 return status;
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859enum ice_status
1860ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
1861{
1862 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
1863 struct ice_aqc_get_phy_caps_data *pcaps;
1864 enum ice_status status;
1865 u8 pause_mask = 0x0;
1866 struct ice_hw *hw;
1867
1868 if (!pi)
1869 return ICE_ERR_PARAM;
1870 hw = pi->hw;
1871 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
1872
1873 switch (pi->fc.req_mode) {
1874 case ICE_FC_FULL:
1875 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1876 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1877 break;
1878 case ICE_FC_RX_PAUSE:
1879 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
1880 break;
1881 case ICE_FC_TX_PAUSE:
1882 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
1883 break;
1884 default:
1885 break;
1886 }
1887
1888 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
1889 if (!pcaps)
1890 return ICE_ERR_NO_MEMORY;
1891
1892
1893 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
1894 NULL);
1895 if (status) {
1896 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
1897 goto out;
1898 }
1899
1900
1901 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
1902 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
1903
1904 cfg.caps |= pause_mask;
1905
1906 if (cfg.caps != pcaps->caps) {
1907 int retry_count, retry_max = 10;
1908
1909
1910 if (ena_auto_link_update)
1911 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1912
1913 cfg.phy_type_low = pcaps->phy_type_low;
1914 cfg.low_power_ctrl = pcaps->low_power_ctrl;
1915 cfg.eee_cap = pcaps->eee_cap;
1916 cfg.eeer_value = pcaps->eeer_value;
1917 cfg.link_fec_opt = pcaps->link_fec_options;
1918
1919 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
1920 if (status) {
1921 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
1922 goto out;
1923 }
1924
1925
1926
1927
1928
1929
1930 for (retry_count = 0; retry_count < retry_max; retry_count++) {
1931 status = ice_update_link_info(pi);
1932
1933 if (!status)
1934 break;
1935
1936 mdelay(100);
1937 }
1938
1939 if (status)
1940 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
1941 }
1942
1943out:
1944 devm_kfree(ice_hw_to_dev(hw), pcaps);
1945 return status;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1958{
1959 struct ice_phy_info *phy_info;
1960 enum ice_status status = 0;
1961
1962 if (!pi || !link_up)
1963 return ICE_ERR_PARAM;
1964
1965 phy_info = &pi->phy;
1966
1967 if (phy_info->get_link_info) {
1968 status = ice_update_link_info(pi);
1969
1970 if (status)
1971 ice_debug(pi->hw, ICE_DBG_LINK,
1972 "get link status error, status = %d\n",
1973 status);
1974 }
1975
1976 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
1977
1978 return status;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989enum ice_status
1990ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
1991 struct ice_sq_cd *cd)
1992{
1993 struct ice_aqc_restart_an *cmd;
1994 struct ice_aq_desc desc;
1995
1996 cmd = &desc.params.restart_an;
1997
1998 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
1999
2000 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2001 cmd->lport_num = pi->lport;
2002 if (ena_link)
2003 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2004 else
2005 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2006
2007 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022static enum ice_status
2023__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2024 u16 lut_size, u8 glob_lut_idx, bool set)
2025{
2026 struct ice_aqc_get_set_rss_lut *cmd_resp;
2027 struct ice_aq_desc desc;
2028 enum ice_status status;
2029 u16 flags = 0;
2030
2031 cmd_resp = &desc.params.get_set_rss_lut;
2032
2033 if (set) {
2034 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2035 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2036 } else {
2037 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2038 }
2039
2040 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2041 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2042 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2043 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2044
2045 switch (lut_type) {
2046 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2047 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2048 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2049 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2050 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2051 break;
2052 default:
2053 status = ICE_ERR_PARAM;
2054 goto ice_aq_get_set_rss_lut_exit;
2055 }
2056
2057 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2058 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2059 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2060
2061 if (!set)
2062 goto ice_aq_get_set_rss_lut_send;
2063 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2064 if (!set)
2065 goto ice_aq_get_set_rss_lut_send;
2066 } else {
2067 goto ice_aq_get_set_rss_lut_send;
2068 }
2069
2070
2071 switch (lut_size) {
2072 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2073 break;
2074 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2075 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2076 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2077 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2078 break;
2079 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2080 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2081 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2082 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2083 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2084 break;
2085 }
2086
2087 default:
2088 status = ICE_ERR_PARAM;
2089 goto ice_aq_get_set_rss_lut_exit;
2090 }
2091
2092ice_aq_get_set_rss_lut_send:
2093 cmd_resp->flags = cpu_to_le16(flags);
2094 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2095
2096ice_aq_get_set_rss_lut_exit:
2097 return status;
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110enum ice_status
2111ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2112 u8 *lut, u16 lut_size)
2113{
2114 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2115 return ICE_ERR_PARAM;
2116
2117 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2118 lut_type, lut, lut_size, 0, false);
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131enum ice_status
2132ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2133 u8 *lut, u16 lut_size)
2134{
2135 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2136 return ICE_ERR_PARAM;
2137
2138 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2139 lut_type, lut, lut_size, 0, true);
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151static enum
2152ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2153 struct ice_aqc_get_set_rss_keys *key,
2154 bool set)
2155{
2156 struct ice_aqc_get_set_rss_key *cmd_resp;
2157 u16 key_size = sizeof(*key);
2158 struct ice_aq_desc desc;
2159
2160 cmd_resp = &desc.params.get_set_rss_key;
2161
2162 if (set) {
2163 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2164 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2165 } else {
2166 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2167 }
2168
2169 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2170 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2171 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2172 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2173
2174 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2175}
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185enum ice_status
2186ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2187 struct ice_aqc_get_set_rss_keys *key)
2188{
2189 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2190 return ICE_ERR_PARAM;
2191
2192 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2193 key, false);
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204enum ice_status
2205ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2206 struct ice_aqc_get_set_rss_keys *keys)
2207{
2208 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2209 return ICE_ERR_PARAM;
2210
2211 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2212 keys, true);
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236static enum ice_status
2237ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2238 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2239 struct ice_sq_cd *cd)
2240{
2241 u16 i, sum_header_size, sum_q_size = 0;
2242 struct ice_aqc_add_tx_qgrp *list;
2243 struct ice_aqc_add_txqs *cmd;
2244 struct ice_aq_desc desc;
2245
2246 cmd = &desc.params.add_txqs;
2247
2248 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2249
2250 if (!qg_list)
2251 return ICE_ERR_PARAM;
2252
2253 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2254 return ICE_ERR_PARAM;
2255
2256 sum_header_size = num_qgrps *
2257 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2258
2259 list = qg_list;
2260 for (i = 0; i < num_qgrps; i++) {
2261 struct ice_aqc_add_txqs_perq *q = list->txqs;
2262
2263 sum_q_size += list->num_txqs * sizeof(*q);
2264 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2265 }
2266
2267 if (buf_size != (sum_header_size + sum_q_size))
2268 return ICE_ERR_PARAM;
2269
2270 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2271
2272 cmd->num_qgrps = num_qgrps;
2273
2274 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2275}
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289static enum ice_status
2290ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2291 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2292 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2293 struct ice_sq_cd *cd)
2294{
2295 struct ice_aqc_dis_txqs *cmd;
2296 struct ice_aq_desc desc;
2297 u16 i, sz = 0;
2298
2299 cmd = &desc.params.dis_txqs;
2300 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2301
2302
2303 if (!qg_list && !rst_src)
2304 return ICE_ERR_PARAM;
2305
2306 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2307 return ICE_ERR_PARAM;
2308
2309 cmd->num_entries = num_qgrps;
2310
2311 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2312 ICE_AQC_Q_DIS_TIMEOUT_M);
2313
2314 switch (rst_src) {
2315 case ICE_VM_RESET:
2316 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2317 cmd->vmvf_and_timeout |=
2318 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2319 break;
2320 case ICE_VF_RESET:
2321 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2322
2323 cmd->vmvf_and_timeout |=
2324 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2325 ICE_AQC_Q_DIS_VMVF_NUM_M);
2326 break;
2327 case ICE_NO_RESET:
2328 default:
2329 break;
2330 }
2331
2332
2333 if (!qg_list)
2334 goto do_aq;
2335
2336
2337
2338
2339 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2340
2341 for (i = 0; i < num_qgrps; ++i) {
2342
2343 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2344
2345
2346 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2347
2348
2349 if ((qg_list[i].num_qs % 2) == 0)
2350 sz += 2;
2351 }
2352
2353 if (buf_size != sz)
2354 return ICE_ERR_PARAM;
2355
2356do_aq:
2357 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2358}
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
2369 const struct ice_ctx_ele *ce_info)
2370{
2371 u8 src_byte, dest_byte, mask;
2372 u8 *from, *dest;
2373 u16 shift_width;
2374
2375
2376 from = src_ctx + ce_info->offset;
2377
2378
2379 shift_width = ce_info->lsb % 8;
2380 mask = (u8)(BIT(ce_info->width) - 1);
2381
2382 src_byte = *from;
2383 src_byte &= mask;
2384
2385
2386 mask <<= shift_width;
2387 src_byte <<= shift_width;
2388
2389
2390 dest = dest_ctx + (ce_info->lsb / 8);
2391
2392 memcpy(&dest_byte, dest, sizeof(dest_byte));
2393
2394 dest_byte &= ~mask;
2395 dest_byte |= src_byte;
2396
2397
2398 memcpy(dest, &dest_byte, sizeof(dest_byte));
2399}
2400
2401
2402
2403
2404
2405
2406
2407static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
2408 const struct ice_ctx_ele *ce_info)
2409{
2410 u16 src_word, mask;
2411 __le16 dest_word;
2412 u8 *from, *dest;
2413 u16 shift_width;
2414
2415
2416 from = src_ctx + ce_info->offset;
2417
2418
2419 shift_width = ce_info->lsb % 8;
2420 mask = BIT(ce_info->width) - 1;
2421
2422
2423
2424
2425 src_word = *(u16 *)from;
2426 src_word &= mask;
2427
2428
2429 mask <<= shift_width;
2430 src_word <<= shift_width;
2431
2432
2433 dest = dest_ctx + (ce_info->lsb / 8);
2434
2435 memcpy(&dest_word, dest, sizeof(dest_word));
2436
2437 dest_word &= ~(cpu_to_le16(mask));
2438 dest_word |= cpu_to_le16(src_word);
2439
2440
2441 memcpy(dest, &dest_word, sizeof(dest_word));
2442}
2443
2444
2445
2446
2447
2448
2449
2450static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
2451 const struct ice_ctx_ele *ce_info)
2452{
2453 u32 src_dword, mask;
2454 __le32 dest_dword;
2455 u8 *from, *dest;
2456 u16 shift_width;
2457
2458
2459 from = src_ctx + ce_info->offset;
2460
2461
2462 shift_width = ce_info->lsb % 8;
2463
2464
2465
2466
2467
2468 if (ce_info->width < 32)
2469 mask = BIT(ce_info->width) - 1;
2470 else
2471 mask = (u32)~0;
2472
2473
2474
2475
2476 src_dword = *(u32 *)from;
2477 src_dword &= mask;
2478
2479
2480 mask <<= shift_width;
2481 src_dword <<= shift_width;
2482
2483
2484 dest = dest_ctx + (ce_info->lsb / 8);
2485
2486 memcpy(&dest_dword, dest, sizeof(dest_dword));
2487
2488 dest_dword &= ~(cpu_to_le32(mask));
2489 dest_dword |= cpu_to_le32(src_dword);
2490
2491
2492 memcpy(dest, &dest_dword, sizeof(dest_dword));
2493}
2494
2495
2496
2497
2498
2499
2500
2501static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
2502 const struct ice_ctx_ele *ce_info)
2503{
2504 u64 src_qword, mask;
2505 __le64 dest_qword;
2506 u8 *from, *dest;
2507 u16 shift_width;
2508
2509
2510 from = src_ctx + ce_info->offset;
2511
2512
2513 shift_width = ce_info->lsb % 8;
2514
2515
2516
2517
2518
2519 if (ce_info->width < 64)
2520 mask = BIT_ULL(ce_info->width) - 1;
2521 else
2522 mask = (u64)~0;
2523
2524
2525
2526
2527 src_qword = *(u64 *)from;
2528 src_qword &= mask;
2529
2530
2531 mask <<= shift_width;
2532 src_qword <<= shift_width;
2533
2534
2535 dest = dest_ctx + (ce_info->lsb / 8);
2536
2537 memcpy(&dest_qword, dest, sizeof(dest_qword));
2538
2539 dest_qword &= ~(cpu_to_le64(mask));
2540 dest_qword |= cpu_to_le64(src_qword);
2541
2542
2543 memcpy(dest, &dest_qword, sizeof(dest_qword));
2544}
2545
2546
2547
2548
2549
2550
2551
2552enum ice_status
2553ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2554{
2555 int f;
2556
2557 for (f = 0; ce_info[f].width; f++) {
2558
2559
2560
2561
2562 switch (ce_info[f].size_of) {
2563 case sizeof(u8):
2564 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2565 break;
2566 case sizeof(u16):
2567 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2568 break;
2569 case sizeof(u32):
2570 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2571 break;
2572 case sizeof(u64):
2573 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2574 break;
2575 default:
2576 return ICE_ERR_INVAL_SIZE;
2577 }
2578 }
2579
2580 return 0;
2581}
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595enum ice_status
2596ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
2597 struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2598 struct ice_sq_cd *cd)
2599{
2600 struct ice_aqc_txsched_elem_data node = { 0 };
2601 struct ice_sched_node *parent;
2602 enum ice_status status;
2603 struct ice_hw *hw;
2604
2605 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2606 return ICE_ERR_CFG;
2607
2608 if (num_qgrps > 1 || buf->num_txqs > 1)
2609 return ICE_ERR_MAX_LIMIT;
2610
2611 hw = pi->hw;
2612
2613 if (!ice_is_vsi_valid(hw, vsi_handle))
2614 return ICE_ERR_PARAM;
2615
2616 mutex_lock(&pi->sched_lock);
2617
2618
2619 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
2620 ICE_SCHED_NODE_OWNER_LAN);
2621 if (!parent) {
2622 status = ICE_ERR_PARAM;
2623 goto ena_txq_exit;
2624 }
2625
2626 buf->parent_teid = parent->info.node_teid;
2627 node.parent_teid = parent->info.node_teid;
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
2640
2641
2642 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
2643 if (status)
2644 goto ena_txq_exit;
2645
2646 node.node_teid = buf->txqs[0].q_teid;
2647 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
2648
2649
2650 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
2651
2652ena_txq_exit:
2653 mutex_unlock(&pi->sched_lock);
2654 return status;
2655}
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669enum ice_status
2670ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
2671 u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
2672 struct ice_sq_cd *cd)
2673{
2674 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
2675 struct ice_aqc_dis_txq_item qg_list;
2676 u16 i;
2677
2678 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2679 return ICE_ERR_CFG;
2680
2681
2682
2683
2684
2685
2686 if (!num_queues && rst_src)
2687 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
2688 NULL);
2689
2690 mutex_lock(&pi->sched_lock);
2691
2692 for (i = 0; i < num_queues; i++) {
2693 struct ice_sched_node *node;
2694
2695 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
2696 if (!node)
2697 continue;
2698 qg_list.parent_teid = node->info.parent_teid;
2699 qg_list.num_qs = 1;
2700 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
2701 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
2702 sizeof(qg_list), rst_src, vmvf_num,
2703 cd);
2704
2705 if (status)
2706 break;
2707 ice_free_sched_node(pi, node);
2708 }
2709 mutex_unlock(&pi->sched_lock);
2710 return status;
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723static enum ice_status
2724ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2725 u16 *maxqs, u8 owner)
2726{
2727 enum ice_status status = 0;
2728 u8 i;
2729
2730 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2731 return ICE_ERR_CFG;
2732
2733 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2734 return ICE_ERR_PARAM;
2735
2736 mutex_lock(&pi->sched_lock);
2737
2738 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
2739
2740 if (!ice_sched_get_tc_node(pi, i))
2741 continue;
2742
2743 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
2744 ice_is_tc_ena(tc_bitmap, i));
2745 if (status)
2746 break;
2747 }
2748
2749 mutex_unlock(&pi->sched_lock);
2750 return status;
2751}
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762enum ice_status
2763ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
2764 u16 *max_lanqs)
2765{
2766 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
2767 ICE_SCHED_NODE_OWNER_LAN);
2768}
2769
2770
2771
2772
2773
2774
2775
2776static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
2777{
2778 struct ice_switch_info *sw = hw->switch_info;
2779 u8 i;
2780
2781
2782 ice_rm_all_sw_replay_rule_info(hw);
2783
2784
2785
2786
2787 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
2788 list_replace_init(&sw->recp_list[i].filt_rules,
2789 &sw->recp_list[i].filt_replay_rules);
2790
2791 return 0;
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
2803{
2804 enum ice_status status;
2805
2806 if (!ice_is_vsi_valid(hw, vsi_handle))
2807 return ICE_ERR_PARAM;
2808
2809
2810 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
2811 status = ice_replay_pre_init(hw);
2812 if (status)
2813 return status;
2814 }
2815
2816
2817 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
2818 return status;
2819}
2820
2821
2822
2823
2824
2825
2826
2827void ice_replay_post(struct ice_hw *hw)
2828{
2829
2830 ice_rm_all_sw_replay_rule_info(hw);
2831}
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
2843 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
2844{
2845 u64 new_data;
2846
2847 new_data = rd32(hw, loreg);
2848 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2849
2850
2851
2852
2853
2854
2855 if (!prev_stat_loaded)
2856 *prev_stat = new_data;
2857 if (new_data >= *prev_stat)
2858 *cur_stat = new_data - *prev_stat;
2859 else
2860
2861 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
2862 *cur_stat &= 0xFFFFFFFFFFULL;
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
2874 u64 *prev_stat, u64 *cur_stat)
2875{
2876 u32 new_data;
2877
2878 new_data = rd32(hw, reg);
2879
2880
2881
2882
2883
2884
2885 if (!prev_stat_loaded)
2886 *prev_stat = new_data;
2887 if (new_data >= *prev_stat)
2888 *cur_stat = new_data - *prev_stat;
2889 else
2890
2891 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
2892}
2893