1
2
3
4#include "ice_common.h"
5#include "ice_sched.h"
6#include "ice_adminq_cmd.h"
7
8#define ICE_PF_RESET_WAIT_COUNT 200
9
10#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
11 wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
12 ((ICE_RX_OPC_MDID << \
13 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
14 GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
15 (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
16 GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
17
18#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
19 wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
20 (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
21 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
22 (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
23 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
24 (((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
25 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
26 (((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
27 GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
28
29
30
31
32
33
34
35
36static enum ice_status ice_set_mac_type(struct ice_hw *hw)
37{
38 if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
39 return ICE_ERR_DEVICE_NOT_SUPPORTED;
40
41 hw->mac_type = ICE_MAC_GENERIC;
42 return 0;
43}
44
45
46
47
48
49
50
51
52void ice_dev_onetime_setup(struct ice_hw *hw)
53{
54#define MBX_PF_VT_PFALLOC 0x00231E80
55
56 wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
57}
58
59
60
61
62
63
64
65
66enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
67{
68 struct ice_aq_desc desc;
69
70 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
71
72 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89static enum ice_status
90ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
91 struct ice_sq_cd *cd)
92{
93 struct ice_aqc_manage_mac_read_resp *resp;
94 struct ice_aqc_manage_mac_read *cmd;
95 struct ice_aq_desc desc;
96 enum ice_status status;
97 u16 flags;
98 u8 i;
99
100 cmd = &desc.params.mac_read;
101
102 if (buf_size < sizeof(*resp))
103 return ICE_ERR_BUF_TOO_SHORT;
104
105 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
106
107 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
108 if (status)
109 return status;
110
111 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
112 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
113
114 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
115 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
116 return ICE_ERR_CFG;
117 }
118
119
120 for (i = 0; i < cmd->num_addr; i++)
121 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
122 ether_addr_copy(hw->port_info->mac.lan_addr,
123 resp[i].mac_addr);
124 ether_addr_copy(hw->port_info->mac.perm_addr,
125 resp[i].mac_addr);
126 break;
127 }
128
129 return 0;
130}
131
132
133
134
135
136
137
138
139
140
141
142enum ice_status
143ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
144 struct ice_aqc_get_phy_caps_data *pcaps,
145 struct ice_sq_cd *cd)
146{
147 struct ice_aqc_get_phy_caps *cmd;
148 u16 pcaps_size = sizeof(*pcaps);
149 struct ice_aq_desc desc;
150 enum ice_status status;
151
152 cmd = &desc.params.get_phy;
153
154 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
155 return ICE_ERR_PARAM;
156
157 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
158
159 if (qual_mods)
160 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
161
162 cmd->param0 |= cpu_to_le16(report_mode);
163 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
164
165 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
166 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
167 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
168 }
169
170 return status;
171}
172
173
174
175
176
177static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
178{
179 struct ice_link_status *hw_link_info;
180
181 if (!pi)
182 return ICE_MEDIA_UNKNOWN;
183
184 hw_link_info = &pi->phy.link_info;
185 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
186
187 return ICE_MEDIA_UNKNOWN;
188
189 if (hw_link_info->phy_type_low) {
190 switch (hw_link_info->phy_type_low) {
191 case ICE_PHY_TYPE_LOW_1000BASE_SX:
192 case ICE_PHY_TYPE_LOW_1000BASE_LX:
193 case ICE_PHY_TYPE_LOW_10GBASE_SR:
194 case ICE_PHY_TYPE_LOW_10GBASE_LR:
195 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
196 case ICE_PHY_TYPE_LOW_25GBASE_SR:
197 case ICE_PHY_TYPE_LOW_25GBASE_LR:
198 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
199 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
200 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
201 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
202 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
203 case ICE_PHY_TYPE_LOW_50GBASE_SR:
204 case ICE_PHY_TYPE_LOW_50GBASE_FR:
205 case ICE_PHY_TYPE_LOW_50GBASE_LR:
206 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
207 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
208 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
209 case ICE_PHY_TYPE_LOW_100GBASE_DR:
210 return ICE_MEDIA_FIBER;
211 case ICE_PHY_TYPE_LOW_100BASE_TX:
212 case ICE_PHY_TYPE_LOW_1000BASE_T:
213 case ICE_PHY_TYPE_LOW_2500BASE_T:
214 case ICE_PHY_TYPE_LOW_5GBASE_T:
215 case ICE_PHY_TYPE_LOW_10GBASE_T:
216 case ICE_PHY_TYPE_LOW_25GBASE_T:
217 return ICE_MEDIA_BASET;
218 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
219 case ICE_PHY_TYPE_LOW_25GBASE_CR:
220 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
221 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
222 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
223 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
224 case ICE_PHY_TYPE_LOW_50GBASE_CP:
225 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
226 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
227 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
228 return ICE_MEDIA_DA;
229 case ICE_PHY_TYPE_LOW_1000BASE_KX:
230 case ICE_PHY_TYPE_LOW_2500BASE_KX:
231 case ICE_PHY_TYPE_LOW_2500BASE_X:
232 case ICE_PHY_TYPE_LOW_5GBASE_KR:
233 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
234 case ICE_PHY_TYPE_LOW_25GBASE_KR:
235 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
236 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
237 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
238 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
239 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
240 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
241 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
242 return ICE_MEDIA_BACKPLANE;
243 }
244 } else {
245 switch (hw_link_info->phy_type_high) {
246 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
247 return ICE_MEDIA_BACKPLANE;
248 }
249 }
250 return ICE_MEDIA_UNKNOWN;
251}
252
253
254
255
256
257
258
259
260
261
262enum ice_status
263ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
264 struct ice_link_status *link, struct ice_sq_cd *cd)
265{
266 struct ice_link_status *hw_link_info_old, *hw_link_info;
267 struct ice_aqc_get_link_status_data link_data = { 0 };
268 struct ice_aqc_get_link_status *resp;
269 enum ice_media_type *hw_media_type;
270 struct ice_fc_info *hw_fc_info;
271 bool tx_pause, rx_pause;
272 struct ice_aq_desc desc;
273 enum ice_status status;
274 u16 cmd_flags;
275
276 if (!pi)
277 return ICE_ERR_PARAM;
278 hw_link_info_old = &pi->phy.link_info_old;
279 hw_media_type = &pi->phy.media_type;
280 hw_link_info = &pi->phy.link_info;
281 hw_fc_info = &pi->fc;
282
283 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
284 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
285 resp = &desc.params.get_link_status;
286 resp->cmd_flags = cpu_to_le16(cmd_flags);
287 resp->lport_num = pi->lport;
288
289 status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
290 cd);
291
292 if (status)
293 return status;
294
295
296 *hw_link_info_old = *hw_link_info;
297
298
299 hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
300 hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
301 hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
302 *hw_media_type = ice_get_media_type(pi);
303 hw_link_info->link_info = link_data.link_info;
304 hw_link_info->an_info = link_data.an_info;
305 hw_link_info->ext_info = link_data.ext_info;
306 hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
307 hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
308 hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
309 hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
310
311
312 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
313 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
314 if (tx_pause && rx_pause)
315 hw_fc_info->current_mode = ICE_FC_FULL;
316 else if (tx_pause)
317 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
318 else if (rx_pause)
319 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
320 else
321 hw_fc_info->current_mode = ICE_FC_NONE;
322
323 hw_link_info->lse_ena =
324 !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
325
326
327 if (link)
328 *link = *hw_link_info;
329
330
331 pi->phy.get_link_info = false;
332
333 return 0;
334}
335
336
337
338
339
340
341
342
343static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
344{
345 u8 idx = 0;
346
347
348
349
350
351
352
353 switch (prof_id) {
354
355
356
357
358 case ICE_RXDID_FLEX_NIC:
359 case ICE_RXDID_FLEX_NIC_2:
360 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
361 ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
362 ICE_FLG_FIN, idx++);
363
364
365
366 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
367 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
368 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
369 ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
370 ICE_FLG_EVLAN_x9100, idx++);
371 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
372 ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
373 ICE_FLG_TNL0, idx++);
374 ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
375 ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
376 break;
377
378 default:
379 ice_debug(hw, ICE_DBG_INIT,
380 "Flag programming for profile ID %d not supported\n",
381 prof_id);
382 }
383}
384
385
386
387
388
389
390
391
392static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
393{
394 enum ice_flex_rx_mdid mdid;
395
396 switch (prof_id) {
397 case ICE_RXDID_FLEX_NIC:
398 case ICE_RXDID_FLEX_NIC_2:
399 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
400 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
401 ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
402
403 mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
404 ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
405
406 ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
407
408 ice_init_flex_flags(hw, prof_id);
409 break;
410
411 default:
412 ice_debug(hw, ICE_DBG_INIT,
413 "Field init for profile ID %d not supported\n",
414 prof_id);
415 }
416}
417
418
419
420
421
422static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
423{
424 struct ice_switch_info *sw;
425
426 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
427 sizeof(*hw->switch_info), GFP_KERNEL);
428 sw = hw->switch_info;
429
430 if (!sw)
431 return ICE_ERR_NO_MEMORY;
432
433 INIT_LIST_HEAD(&sw->vsi_list_map_head);
434
435 return ice_init_def_sw_recp(hw);
436}
437
438
439
440
441
442static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
443{
444 struct ice_switch_info *sw = hw->switch_info;
445 struct ice_vsi_list_map_info *v_pos_map;
446 struct ice_vsi_list_map_info *v_tmp_map;
447 struct ice_sw_recipe *recps;
448 u8 i;
449
450 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
451 list_entry) {
452 list_del(&v_pos_map->list_entry);
453 devm_kfree(ice_hw_to_dev(hw), v_pos_map);
454 }
455 recps = hw->switch_info->recp_list;
456 for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
457 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
458
459 recps[i].root_rid = i;
460 mutex_destroy(&recps[i].filt_rule_lock);
461 list_for_each_entry_safe(lst_itr, tmp_entry,
462 &recps[i].filt_rules, list_entry) {
463 list_del(&lst_itr->list_entry);
464 devm_kfree(ice_hw_to_dev(hw), lst_itr);
465 }
466 }
467 ice_rm_all_sw_replay_rule_info(hw);
468 devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
469 devm_kfree(ice_hw_to_dev(hw), sw);
470}
471
472#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \
473 (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
474#define ICE_FW_LOG_DESC_SIZE_MAX \
475 ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
476
477
478
479
480
481static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
482{
483 struct ice_aqc_fw_logging_data *config;
484 struct ice_aq_desc desc;
485 enum ice_status status;
486 u16 size;
487
488 size = ICE_FW_LOG_DESC_SIZE_MAX;
489 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
490 if (!config)
491 return ICE_ERR_NO_MEMORY;
492
493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
494
495 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
496 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
497
498 status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
499 if (!status) {
500 u16 i;
501
502
503 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
504 u16 v, m, flgs;
505
506 v = le16_to_cpu(config->entry[i]);
507 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
508 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
509
510 if (m < ICE_AQC_FW_LOG_ID_MAX)
511 hw->fw_log.evnts[m].cur = flgs;
512 }
513 }
514
515 devm_kfree(ice_hw_to_dev(hw), config);
516
517 return status;
518}
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
557{
558 struct ice_aqc_fw_logging_data *data = NULL;
559 struct ice_aqc_fw_logging *cmd;
560 enum ice_status status = 0;
561 u16 i, chgs = 0, len = 0;
562 struct ice_aq_desc desc;
563 u8 actv_evnts = 0;
564 void *buf = NULL;
565
566 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
567 return 0;
568
569
570 if (!enable &&
571 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq)))
572 return 0;
573
574
575 status = ice_get_fw_log_cfg(hw);
576 if (status)
577 return status;
578
579 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging);
580 cmd = &desc.params.fw_logging;
581
582
583 if (hw->fw_log.cq_en)
584 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID;
585
586 if (hw->fw_log.uart_en)
587 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID;
588
589 if (enable) {
590
591
592
593 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
594 u16 val;
595
596
597 actv_evnts |= hw->fw_log.evnts[i].cfg;
598
599 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur)
600 continue;
601
602 if (!data) {
603 data = devm_kzalloc(ice_hw_to_dev(hw),
604 ICE_FW_LOG_DESC_SIZE_MAX,
605 GFP_KERNEL);
606 if (!data)
607 return ICE_ERR_NO_MEMORY;
608 }
609
610 val = i << ICE_AQC_FW_LOG_ID_S;
611 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
612 data->entry[chgs++] = cpu_to_le16(val);
613 }
614
615
616
617
618
619 if (actv_evnts) {
620
621 if (!chgs)
622 goto out;
623
624 if (hw->fw_log.cq_en)
625 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN;
626
627 if (hw->fw_log.uart_en)
628 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
629
630 buf = data;
631 len = ICE_FW_LOG_DESC_SIZE(chgs);
632 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
633 }
634 }
635
636 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL);
637 if (!status) {
638
639
640
641
642
643
644 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX;
645
646 hw->fw_log.actv_evnts = actv_evnts;
647 for (i = 0; i < cnt; i++) {
648 u16 v, m;
649
650 if (!enable) {
651
652
653
654
655
656
657 hw->fw_log.evnts[i].cur = 0;
658 continue;
659 }
660
661 v = le16_to_cpu(data->entry[i]);
662 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
663 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
664 }
665 }
666
667out:
668 if (data)
669 devm_kfree(ice_hw_to_dev(hw), data);
670
671 return status;
672}
673
674
675
676
677
678
679
680
681
682void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
683{
684 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
685 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
686 le16_to_cpu(desc->datalen));
687 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
688}
689
690
691
692
693
694
695
696
697static void ice_get_itr_intrl_gran(struct ice_hw *hw)
698{
699 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
700 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
701 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
702
703 switch (max_agg_bw) {
704 case ICE_MAX_AGG_BW_200G:
705 case ICE_MAX_AGG_BW_100G:
706 case ICE_MAX_AGG_BW_50G:
707 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
708 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
709 break;
710 case ICE_MAX_AGG_BW_25G:
711 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
712 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
713 break;
714 }
715}
716
717
718
719
720
721enum ice_status ice_init_hw(struct ice_hw *hw)
722{
723 struct ice_aqc_get_phy_caps_data *pcaps;
724 enum ice_status status;
725 u16 mac_buf_len;
726 void *mac_buf;
727
728
729 status = ice_set_mac_type(hw);
730 if (status)
731 return status;
732
733 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
734 PF_FUNC_RID_FUNC_NUM_M) >>
735 PF_FUNC_RID_FUNC_NUM_S;
736
737 status = ice_reset(hw, ICE_RESET_PFR);
738 if (status)
739 return status;
740
741 ice_get_itr_intrl_gran(hw);
742
743 status = ice_init_all_ctrlq(hw);
744 if (status)
745 goto err_unroll_cqinit;
746
747
748 status = ice_cfg_fw_log(hw, true);
749 if (status)
750 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n");
751
752 status = ice_clear_pf_cfg(hw);
753 if (status)
754 goto err_unroll_cqinit;
755
756 ice_clear_pxe_mode(hw);
757
758 status = ice_init_nvm(hw);
759 if (status)
760 goto err_unroll_cqinit;
761
762 status = ice_get_caps(hw);
763 if (status)
764 goto err_unroll_cqinit;
765
766 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
767 sizeof(*hw->port_info), GFP_KERNEL);
768 if (!hw->port_info) {
769 status = ICE_ERR_NO_MEMORY;
770 goto err_unroll_cqinit;
771 }
772
773
774 hw->port_info->hw = hw;
775
776
777 status = ice_get_initial_sw_cfg(hw);
778 if (status)
779 goto err_unroll_alloc;
780
781 hw->evb_veb = true;
782
783
784 status = ice_sched_query_res_alloc(hw);
785 if (status) {
786 ice_debug(hw, ICE_DBG_SCHED,
787 "Failed to get scheduler allocated resources\n");
788 goto err_unroll_alloc;
789 }
790
791
792 status = ice_sched_init_port(hw->port_info);
793 if (status)
794 goto err_unroll_sched;
795
796 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
797 if (!pcaps) {
798 status = ICE_ERR_NO_MEMORY;
799 goto err_unroll_sched;
800 }
801
802
803 status = ice_aq_get_phy_caps(hw->port_info, false,
804 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
805 devm_kfree(ice_hw_to_dev(hw), pcaps);
806 if (status)
807 goto err_unroll_sched;
808
809
810 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
811 if (status)
812 goto err_unroll_sched;
813
814
815 if (!hw->sw_entry_point_layer) {
816 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
817 status = ICE_ERR_CFG;
818 goto err_unroll_sched;
819 }
820 INIT_LIST_HEAD(&hw->agg_list);
821
822 status = ice_init_fltr_mgmt_struct(hw);
823 if (status)
824 goto err_unroll_sched;
825
826 ice_dev_onetime_setup(hw);
827
828
829
830 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
831 sizeof(struct ice_aqc_manage_mac_read_resp),
832 GFP_KERNEL);
833 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
834
835 if (!mac_buf) {
836 status = ICE_ERR_NO_MEMORY;
837 goto err_unroll_fltr_mgmt_struct;
838 }
839
840 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
841 devm_kfree(ice_hw_to_dev(hw), mac_buf);
842
843 if (status)
844 goto err_unroll_fltr_mgmt_struct;
845
846 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
847 ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
848
849 return 0;
850
851err_unroll_fltr_mgmt_struct:
852 ice_cleanup_fltr_mgmt_struct(hw);
853err_unroll_sched:
854 ice_sched_cleanup_all(hw);
855err_unroll_alloc:
856 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
857err_unroll_cqinit:
858 ice_shutdown_all_ctrlq(hw);
859 return status;
860}
861
862
863
864
865
866
867
868
869
870void ice_deinit_hw(struct ice_hw *hw)
871{
872 ice_cleanup_fltr_mgmt_struct(hw);
873
874 ice_sched_cleanup_all(hw);
875 ice_sched_clear_agg(hw);
876
877 if (hw->port_info) {
878 devm_kfree(ice_hw_to_dev(hw), hw->port_info);
879 hw->port_info = NULL;
880 }
881
882
883 ice_cfg_fw_log(hw, false);
884 ice_shutdown_all_ctrlq(hw);
885
886
887 ice_clear_all_vsi_ctx(hw);
888}
889
890
891
892
893
894enum ice_status ice_check_reset(struct ice_hw *hw)
895{
896 u32 cnt, reg = 0, grst_delay;
897
898
899
900
901
902 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
903 GLGEN_RSTCTL_GRSTDEL_S) + 10;
904
905 for (cnt = 0; cnt < grst_delay; cnt++) {
906 mdelay(100);
907 reg = rd32(hw, GLGEN_RSTAT);
908 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
909 break;
910 }
911
912 if (cnt == grst_delay) {
913 ice_debug(hw, ICE_DBG_INIT,
914 "Global reset polling failed to complete.\n");
915 return ICE_ERR_RESET_FAILED;
916 }
917
918#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
919 GLNVM_ULD_GLOBR_DONE_M)
920
921
922 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
923 reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
924 if (reg == ICE_RESET_DONE_MASK) {
925 ice_debug(hw, ICE_DBG_INIT,
926 "Global reset processes done. %d\n", cnt);
927 break;
928 }
929 mdelay(10);
930 }
931
932 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
933 ice_debug(hw, ICE_DBG_INIT,
934 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
935 reg);
936 return ICE_ERR_RESET_FAILED;
937 }
938
939 return 0;
940}
941
942
943
944
945
946
947
948
949static enum ice_status ice_pf_reset(struct ice_hw *hw)
950{
951 u32 cnt, reg;
952
953
954
955
956
957
958 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
959 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
960
961 if (ice_check_reset(hw))
962 return ICE_ERR_RESET_FAILED;
963
964 return 0;
965 }
966
967
968 reg = rd32(hw, PFGEN_CTRL);
969
970 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
971
972 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
973 reg = rd32(hw, PFGEN_CTRL);
974 if (!(reg & PFGEN_CTRL_PFSWR_M))
975 break;
976
977 mdelay(1);
978 }
979
980 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
981 ice_debug(hw, ICE_DBG_INIT,
982 "PF reset polling failed to complete.\n");
983 return ICE_ERR_RESET_FAILED;
984 }
985
986 return 0;
987}
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1002{
1003 u32 val = 0;
1004
1005 switch (req) {
1006 case ICE_RESET_PFR:
1007 return ice_pf_reset(hw);
1008 case ICE_RESET_CORER:
1009 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1010 val = GLGEN_RTRIG_CORER_M;
1011 break;
1012 case ICE_RESET_GLOBR:
1013 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1014 val = GLGEN_RTRIG_GLOBR_M;
1015 break;
1016 default:
1017 return ICE_ERR_PARAM;
1018 }
1019
1020 val |= rd32(hw, GLGEN_RTRIG);
1021 wr32(hw, GLGEN_RTRIG, val);
1022 ice_flush(hw);
1023
1024
1025 return ice_check_reset(hw);
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static enum ice_status
1037ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1038{
1039 u8 i;
1040
1041 if (!ice_rxq_ctx)
1042 return ICE_ERR_BAD_PTR;
1043
1044 if (rxq_index > QRX_CTRL_MAX_INDEX)
1045 return ICE_ERR_PARAM;
1046
1047
1048 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1049 wr32(hw, QRX_CONTEXT(i, rxq_index),
1050 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1051
1052 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1053 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1054 }
1055
1056 return 0;
1057}
1058
1059
1060static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1061
1062 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1063 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1064 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1065 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1066 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1067 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1068 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1069 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1070 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1071 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1072 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1073 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1074 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1075 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1076 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1077 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1078 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1079 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1080 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1081 { 0 }
1082};
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093enum ice_status
1094ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1095 u32 rxq_index)
1096{
1097 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1098
1099 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1100 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1101}
1102
1103
1104const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1105
1106 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1107 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1108 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1109 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1110 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1111 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1112 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1113 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1114 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1115 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1116 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1117 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1118 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1119 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1120 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1121 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1122 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1123 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1124 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1125 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1126 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1127 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1128 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1129 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1130 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1131 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1132 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 110, 171),
1133 { 0 }
1134};
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146void
1147ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
1148 u16 buf_len)
1149{
1150 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
1151 u16 len;
1152
1153#ifndef CONFIG_DYNAMIC_DEBUG
1154 if (!(mask & hw->debug_mask))
1155 return;
1156#endif
1157
1158 if (!desc)
1159 return;
1160
1161 len = le16_to_cpu(cq_desc->datalen);
1162
1163 ice_debug(hw, mask,
1164 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
1165 le16_to_cpu(cq_desc->opcode),
1166 le16_to_cpu(cq_desc->flags),
1167 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
1168 ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
1169 le32_to_cpu(cq_desc->cookie_high),
1170 le32_to_cpu(cq_desc->cookie_low));
1171 ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
1172 le32_to_cpu(cq_desc->params.generic.param0),
1173 le32_to_cpu(cq_desc->params.generic.param1));
1174 ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
1175 le32_to_cpu(cq_desc->params.generic.addr_high),
1176 le32_to_cpu(cq_desc->params.generic.addr_low));
1177 if (buf && cq_desc->datalen != 0) {
1178 ice_debug(hw, mask, "Buffer:\n");
1179 if (buf_len < len)
1180 len = buf_len;
1181
1182 ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
1183 }
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198enum ice_status
1199ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1200 u16 buf_size, struct ice_sq_cd *cd)
1201{
1202 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1213{
1214 struct ice_aqc_get_ver *resp;
1215 struct ice_aq_desc desc;
1216 enum ice_status status;
1217
1218 resp = &desc.params.get_ver;
1219
1220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1221
1222 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1223
1224 if (!status) {
1225 hw->fw_branch = resp->fw_branch;
1226 hw->fw_maj_ver = resp->fw_major;
1227 hw->fw_min_ver = resp->fw_minor;
1228 hw->fw_patch = resp->fw_patch;
1229 hw->fw_build = le32_to_cpu(resp->fw_build);
1230 hw->api_branch = resp->api_branch;
1231 hw->api_maj_ver = resp->api_major;
1232 hw->api_min_ver = resp->api_minor;
1233 hw->api_patch = resp->api_patch;
1234 }
1235
1236 return status;
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1248{
1249 struct ice_aqc_q_shutdown *cmd;
1250 struct ice_aq_desc desc;
1251
1252 cmd = &desc.params.q_shutdown;
1253
1254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1255
1256 if (unloading)
1257 cmd->driver_unloading = cpu_to_le32(ICE_AQC_DRIVER_UNLOADING);
1258
1259 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static enum ice_status
1289ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1290 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1291 struct ice_sq_cd *cd)
1292{
1293 struct ice_aqc_req_res *cmd_resp;
1294 struct ice_aq_desc desc;
1295 enum ice_status status;
1296
1297 cmd_resp = &desc.params.res_owner;
1298
1299 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1300
1301 cmd_resp->res_id = cpu_to_le16(res);
1302 cmd_resp->access_type = cpu_to_le16(access);
1303 cmd_resp->res_number = cpu_to_le32(sdp_number);
1304 cmd_resp->timeout = cpu_to_le32(*timeout);
1305 *timeout = 0;
1306
1307 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1321 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1322 *timeout = le32_to_cpu(cmd_resp->timeout);
1323 return 0;
1324 } else if (le16_to_cpu(cmd_resp->status) ==
1325 ICE_AQ_RES_GLBL_IN_PROG) {
1326 *timeout = le32_to_cpu(cmd_resp->timeout);
1327 return ICE_ERR_AQ_ERROR;
1328 } else if (le16_to_cpu(cmd_resp->status) ==
1329 ICE_AQ_RES_GLBL_DONE) {
1330 return ICE_ERR_AQ_NO_WORK;
1331 }
1332
1333
1334 *timeout = 0;
1335 return ICE_ERR_AQ_ERROR;
1336 }
1337
1338
1339
1340
1341
1342 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1343 *timeout = le32_to_cpu(cmd_resp->timeout);
1344
1345 return status;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static enum ice_status
1358ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1359 struct ice_sq_cd *cd)
1360{
1361 struct ice_aqc_req_res *cmd;
1362 struct ice_aq_desc desc;
1363
1364 cmd = &desc.params.res_owner;
1365
1366 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1367
1368 cmd->res_id = cpu_to_le16(res);
1369 cmd->res_number = cpu_to_le32(sdp_number);
1370
1371 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383enum ice_status
1384ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1385 enum ice_aq_res_access_type access, u32 timeout)
1386{
1387#define ICE_RES_POLLING_DELAY_MS 10
1388 u32 delay = ICE_RES_POLLING_DELAY_MS;
1389 u32 time_left = timeout;
1390 enum ice_status status;
1391
1392 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1393
1394
1395
1396
1397
1398
1399 if (status == ICE_ERR_AQ_NO_WORK)
1400 goto ice_acquire_res_exit;
1401
1402 if (status)
1403 ice_debug(hw, ICE_DBG_RES,
1404 "resource %d acquire type %d failed.\n", res, access);
1405
1406
1407 timeout = time_left;
1408 while (status && timeout && time_left) {
1409 mdelay(delay);
1410 timeout = (timeout > delay) ? timeout - delay : 0;
1411 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1412
1413 if (status == ICE_ERR_AQ_NO_WORK)
1414
1415 break;
1416
1417 if (!status)
1418
1419 break;
1420 }
1421 if (status && status != ICE_ERR_AQ_NO_WORK)
1422 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1423
1424ice_acquire_res_exit:
1425 if (status == ICE_ERR_AQ_NO_WORK) {
1426 if (access == ICE_RES_WRITE)
1427 ice_debug(hw, ICE_DBG_RES,
1428 "resource indicates no work to do.\n");
1429 else
1430 ice_debug(hw, ICE_DBG_RES,
1431 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1432 }
1433 return status;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1444{
1445 enum ice_status status;
1446 u32 total_delay = 0;
1447
1448 status = ice_aq_release_res(hw, res, 0, NULL);
1449
1450
1451
1452
1453 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1454 (total_delay < hw->adminq.sq_cmd_timeout)) {
1455 mdelay(1);
1456 status = ice_aq_release_res(hw, res, 0, NULL);
1457 total_delay++;
1458 }
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1471{
1472 u8 funcs;
1473
1474#define ICE_CAPS_VALID_FUNCS_M 0xFF
1475 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
1476 ICE_CAPS_VALID_FUNCS_M);
1477
1478 if (!funcs)
1479 return 0;
1480
1481 return max / funcs;
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493static void
1494ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1495 enum ice_adminq_opc opc)
1496{
1497 struct ice_aqc_list_caps_elem *cap_resp;
1498 struct ice_hw_func_caps *func_p = NULL;
1499 struct ice_hw_dev_caps *dev_p = NULL;
1500 struct ice_hw_common_caps *caps;
1501 char const *prefix;
1502 u32 i;
1503
1504 if (!buf)
1505 return;
1506
1507 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1508
1509 if (opc == ice_aqc_opc_list_dev_caps) {
1510 dev_p = &hw->dev_caps;
1511 caps = &dev_p->common_cap;
1512 prefix = "dev cap";
1513 } else if (opc == ice_aqc_opc_list_func_caps) {
1514 func_p = &hw->func_caps;
1515 caps = &func_p->common_cap;
1516 prefix = "func cap";
1517 } else {
1518 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1519 return;
1520 }
1521
1522 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1523 u32 logical_id = le32_to_cpu(cap_resp->logical_id);
1524 u32 phys_id = le32_to_cpu(cap_resp->phys_id);
1525 u32 number = le32_to_cpu(cap_resp->number);
1526 u16 cap = le16_to_cpu(cap_resp->cap);
1527
1528 switch (cap) {
1529 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1530 caps->valid_functions = number;
1531 ice_debug(hw, ICE_DBG_INIT,
1532 "%s: valid functions = %d\n", prefix,
1533 caps->valid_functions);
1534 break;
1535 case ICE_AQC_CAPS_SRIOV:
1536 caps->sr_iov_1_1 = (number == 1);
1537 ice_debug(hw, ICE_DBG_INIT,
1538 "%s: SR-IOV = %d\n", prefix,
1539 caps->sr_iov_1_1);
1540 break;
1541 case ICE_AQC_CAPS_VF:
1542 if (dev_p) {
1543 dev_p->num_vfs_exposed = number;
1544 ice_debug(hw, ICE_DBG_INIT,
1545 "%s: VFs exposed = %d\n", prefix,
1546 dev_p->num_vfs_exposed);
1547 } else if (func_p) {
1548 func_p->num_allocd_vfs = number;
1549 func_p->vf_base_id = logical_id;
1550 ice_debug(hw, ICE_DBG_INIT,
1551 "%s: VFs allocated = %d\n", prefix,
1552 func_p->num_allocd_vfs);
1553 ice_debug(hw, ICE_DBG_INIT,
1554 "%s: VF base_id = %d\n", prefix,
1555 func_p->vf_base_id);
1556 }
1557 break;
1558 case ICE_AQC_CAPS_VSI:
1559 if (dev_p) {
1560 dev_p->num_vsi_allocd_to_host = number;
1561 ice_debug(hw, ICE_DBG_INIT,
1562 "%s: num VSI alloc to host = %d\n",
1563 prefix,
1564 dev_p->num_vsi_allocd_to_host);
1565 } else if (func_p) {
1566 func_p->guar_num_vsi =
1567 ice_get_num_per_func(hw, ICE_MAX_VSI);
1568 ice_debug(hw, ICE_DBG_INIT,
1569 "%s: num guaranteed VSI (fw) = %d\n",
1570 prefix, number);
1571 ice_debug(hw, ICE_DBG_INIT,
1572 "%s: num guaranteed VSI = %d\n",
1573 prefix, func_p->guar_num_vsi);
1574 }
1575 break;
1576 case ICE_AQC_CAPS_RSS:
1577 caps->rss_table_size = number;
1578 caps->rss_table_entry_width = logical_id;
1579 ice_debug(hw, ICE_DBG_INIT,
1580 "%s: RSS table size = %d\n", prefix,
1581 caps->rss_table_size);
1582 ice_debug(hw, ICE_DBG_INIT,
1583 "%s: RSS table width = %d\n", prefix,
1584 caps->rss_table_entry_width);
1585 break;
1586 case ICE_AQC_CAPS_RXQS:
1587 caps->num_rxq = number;
1588 caps->rxq_first_id = phys_id;
1589 ice_debug(hw, ICE_DBG_INIT,
1590 "%s: num Rx queues = %d\n", prefix,
1591 caps->num_rxq);
1592 ice_debug(hw, ICE_DBG_INIT,
1593 "%s: Rx first queue ID = %d\n", prefix,
1594 caps->rxq_first_id);
1595 break;
1596 case ICE_AQC_CAPS_TXQS:
1597 caps->num_txq = number;
1598 caps->txq_first_id = phys_id;
1599 ice_debug(hw, ICE_DBG_INIT,
1600 "%s: num Tx queues = %d\n", prefix,
1601 caps->num_txq);
1602 ice_debug(hw, ICE_DBG_INIT,
1603 "%s: Tx first queue ID = %d\n", prefix,
1604 caps->txq_first_id);
1605 break;
1606 case ICE_AQC_CAPS_MSIX:
1607 caps->num_msix_vectors = number;
1608 caps->msix_vector_first_id = phys_id;
1609 ice_debug(hw, ICE_DBG_INIT,
1610 "%s: MSIX vector count = %d\n", prefix,
1611 caps->num_msix_vectors);
1612 ice_debug(hw, ICE_DBG_INIT,
1613 "%s: MSIX first vector index = %d\n", prefix,
1614 caps->msix_vector_first_id);
1615 break;
1616 case ICE_AQC_CAPS_MAX_MTU:
1617 caps->max_mtu = number;
1618 ice_debug(hw, ICE_DBG_INIT, "%s: max MTU = %d\n",
1619 prefix, caps->max_mtu);
1620 break;
1621 default:
1622 ice_debug(hw, ICE_DBG_INIT,
1623 "%s: unknown capability[%d]: 0x%x\n", prefix,
1624 i, cap);
1625 break;
1626 }
1627 }
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static enum ice_status
1643ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
1644 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1645{
1646 struct ice_aqc_list_caps *cmd;
1647 struct ice_aq_desc desc;
1648 enum ice_status status;
1649
1650 cmd = &desc.params.get_cap;
1651
1652 if (opc != ice_aqc_opc_list_func_caps &&
1653 opc != ice_aqc_opc_list_dev_caps)
1654 return ICE_ERR_PARAM;
1655
1656 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1657
1658 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1659 if (!status)
1660 ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
1661 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
1662 *cap_count = le32_to_cpu(cmd->count);
1663 return status;
1664}
1665
1666
1667
1668
1669
1670
1671static enum ice_status
1672ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
1673{
1674 enum ice_status status;
1675 u32 cap_count;
1676 u16 cbuf_len;
1677 u8 retries;
1678
1679
1680
1681
1682
1683
1684
1685
1686#define ICE_GET_CAP_BUF_COUNT 40
1687#define ICE_GET_CAP_RETRY_COUNT 2
1688
1689 cap_count = ICE_GET_CAP_BUF_COUNT;
1690 retries = ICE_GET_CAP_RETRY_COUNT;
1691
1692 do {
1693 void *cbuf;
1694
1695 cbuf_len = (u16)(cap_count *
1696 sizeof(struct ice_aqc_list_caps_elem));
1697 cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
1698 if (!cbuf)
1699 return ICE_ERR_NO_MEMORY;
1700
1701 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
1702 opc, NULL);
1703 devm_kfree(ice_hw_to_dev(hw), cbuf);
1704
1705 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
1706 break;
1707
1708
1709 } while (--retries);
1710
1711 return status;
1712}
1713
1714
1715
1716
1717
1718enum ice_status ice_get_caps(struct ice_hw *hw)
1719{
1720 enum ice_status status;
1721
1722 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
1723 if (!status)
1724 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
1725
1726 return status;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738enum ice_status
1739ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
1740 struct ice_sq_cd *cd)
1741{
1742 struct ice_aqc_manage_mac_write *cmd;
1743 struct ice_aq_desc desc;
1744
1745 cmd = &desc.params.mac_write;
1746 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
1747
1748 cmd->flags = flags;
1749
1750
1751 cmd->sah = htons(*((const u16 *)mac_addr));
1752 cmd->sal = htonl(*((const u32 *)(mac_addr + 2)));
1753
1754 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1755}
1756
1757
1758
1759
1760
1761
1762
1763static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
1764{
1765 struct ice_aq_desc desc;
1766
1767 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
1768 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
1769
1770 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780void ice_clear_pxe_mode(struct ice_hw *hw)
1781{
1782 if (ice_check_sq_alive(hw, &hw->adminq))
1783 ice_aq_clear_pxe_mode(hw);
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799static u16
1800ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
1801{
1802 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
1803 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1804
1805 switch (phy_type_low) {
1806 case ICE_PHY_TYPE_LOW_100BASE_TX:
1807 case ICE_PHY_TYPE_LOW_100M_SGMII:
1808 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
1809 break;
1810 case ICE_PHY_TYPE_LOW_1000BASE_T:
1811 case ICE_PHY_TYPE_LOW_1000BASE_SX:
1812 case ICE_PHY_TYPE_LOW_1000BASE_LX:
1813 case ICE_PHY_TYPE_LOW_1000BASE_KX:
1814 case ICE_PHY_TYPE_LOW_1G_SGMII:
1815 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
1816 break;
1817 case ICE_PHY_TYPE_LOW_2500BASE_T:
1818 case ICE_PHY_TYPE_LOW_2500BASE_X:
1819 case ICE_PHY_TYPE_LOW_2500BASE_KX:
1820 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
1821 break;
1822 case ICE_PHY_TYPE_LOW_5GBASE_T:
1823 case ICE_PHY_TYPE_LOW_5GBASE_KR:
1824 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
1825 break;
1826 case ICE_PHY_TYPE_LOW_10GBASE_T:
1827 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
1828 case ICE_PHY_TYPE_LOW_10GBASE_SR:
1829 case ICE_PHY_TYPE_LOW_10GBASE_LR:
1830 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1831 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1832 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
1833 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
1834 break;
1835 case ICE_PHY_TYPE_LOW_25GBASE_T:
1836 case ICE_PHY_TYPE_LOW_25GBASE_CR:
1837 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
1838 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
1839 case ICE_PHY_TYPE_LOW_25GBASE_SR:
1840 case ICE_PHY_TYPE_LOW_25GBASE_LR:
1841 case ICE_PHY_TYPE_LOW_25GBASE_KR:
1842 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
1843 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
1844 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1845 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
1846 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
1847 break;
1848 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
1849 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
1850 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
1851 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
1852 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
1853 case ICE_PHY_TYPE_LOW_40G_XLAUI:
1854 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
1855 break;
1856 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
1857 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
1858 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
1859 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
1860 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
1861 case ICE_PHY_TYPE_LOW_50G_LAUI2:
1862 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
1863 case ICE_PHY_TYPE_LOW_50G_AUI2:
1864 case ICE_PHY_TYPE_LOW_50GBASE_CP:
1865 case ICE_PHY_TYPE_LOW_50GBASE_SR:
1866 case ICE_PHY_TYPE_LOW_50GBASE_FR:
1867 case ICE_PHY_TYPE_LOW_50GBASE_LR:
1868 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
1869 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
1870 case ICE_PHY_TYPE_LOW_50G_AUI1:
1871 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
1872 break;
1873 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
1874 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
1875 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
1876 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
1877 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
1878 case ICE_PHY_TYPE_LOW_100G_CAUI4:
1879 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
1880 case ICE_PHY_TYPE_LOW_100G_AUI4:
1881 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
1882 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
1883 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
1884 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
1885 case ICE_PHY_TYPE_LOW_100GBASE_DR:
1886 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
1887 break;
1888 default:
1889 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
1890 break;
1891 }
1892
1893 switch (phy_type_high) {
1894 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
1895 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
1896 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
1897 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
1898 case ICE_PHY_TYPE_HIGH_100G_AUI2:
1899 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
1900 break;
1901 default:
1902 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
1903 break;
1904 }
1905
1906 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
1907 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
1908 return ICE_AQ_LINK_SPEED_UNKNOWN;
1909 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
1910 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
1911 return ICE_AQ_LINK_SPEED_UNKNOWN;
1912 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
1913 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
1914 return speed_phy_type_low;
1915 else
1916 return speed_phy_type_high;
1917}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934void
1935ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
1936 u16 link_speeds_bitmap)
1937{
1938 u64 pt_high;
1939 u64 pt_low;
1940 int index;
1941 u16 speed;
1942
1943
1944 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
1945 pt_low = BIT_ULL(index);
1946 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
1947
1948 if (link_speeds_bitmap & speed)
1949 *phy_type_low |= BIT_ULL(index);
1950 }
1951
1952
1953 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
1954 pt_high = BIT_ULL(index);
1955 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
1956
1957 if (link_speeds_bitmap & speed)
1958 *phy_type_high |= BIT_ULL(index);
1959 }
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974enum ice_status
1975ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
1976 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
1977{
1978 struct ice_aq_desc desc;
1979
1980 if (!cfg)
1981 return ICE_ERR_PARAM;
1982
1983
1984 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
1985 ice_debug(hw, ICE_DBG_PHY,
1986 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
1987 cfg->caps);
1988
1989 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
1990 }
1991
1992 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
1993 desc.params.set_phy.lport_num = lport;
1994 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1995
1996 return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
1997}
1998
1999
2000
2001
2002
2003enum ice_status ice_update_link_info(struct ice_port_info *pi)
2004{
2005 struct ice_link_status *li;
2006 enum ice_status status;
2007
2008 if (!pi)
2009 return ICE_ERR_PARAM;
2010
2011 li = &pi->phy.link_info;
2012
2013 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2014 if (status)
2015 return status;
2016
2017 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2018 struct ice_aqc_get_phy_caps_data *pcaps;
2019 struct ice_hw *hw;
2020
2021 hw = pi->hw;
2022 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
2023 GFP_KERNEL);
2024 if (!pcaps)
2025 return ICE_ERR_NO_MEMORY;
2026
2027 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
2028 pcaps, NULL);
2029 if (!status)
2030 memcpy(li->module_type, &pcaps->module_type,
2031 sizeof(li->module_type));
2032
2033 devm_kfree(ice_hw_to_dev(hw), pcaps);
2034 }
2035
2036 return status;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047enum ice_status
2048ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2049{
2050 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2051 struct ice_aqc_get_phy_caps_data *pcaps;
2052 enum ice_status status;
2053 u8 pause_mask = 0x0;
2054 struct ice_hw *hw;
2055
2056 if (!pi)
2057 return ICE_ERR_PARAM;
2058 hw = pi->hw;
2059 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2060
2061 switch (pi->fc.req_mode) {
2062 case ICE_FC_FULL:
2063 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2064 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2065 break;
2066 case ICE_FC_RX_PAUSE:
2067 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2068 break;
2069 case ICE_FC_TX_PAUSE:
2070 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2071 break;
2072 default:
2073 break;
2074 }
2075
2076 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
2077 if (!pcaps)
2078 return ICE_ERR_NO_MEMORY;
2079
2080
2081 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2082 NULL);
2083 if (status) {
2084 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2085 goto out;
2086 }
2087
2088
2089 cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2090 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2091
2092
2093 cfg.caps |= pause_mask;
2094
2095
2096 if (cfg.caps != pcaps->caps) {
2097 int retry_count, retry_max = 10;
2098
2099
2100 if (ena_auto_link_update)
2101 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2102
2103 cfg.phy_type_high = pcaps->phy_type_high;
2104 cfg.phy_type_low = pcaps->phy_type_low;
2105 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2106 cfg.eee_cap = pcaps->eee_cap;
2107 cfg.eeer_value = pcaps->eeer_value;
2108 cfg.link_fec_opt = pcaps->link_fec_options;
2109
2110 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2111 if (status) {
2112 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2113 goto out;
2114 }
2115
2116
2117
2118
2119
2120
2121 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2122 status = ice_update_link_info(pi);
2123
2124 if (!status)
2125 break;
2126
2127 mdelay(100);
2128 }
2129
2130 if (status)
2131 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2132 }
2133
2134out:
2135 devm_kfree(ice_hw_to_dev(hw), pcaps);
2136 return status;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147void
2148ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
2149 struct ice_aqc_set_phy_cfg_data *cfg)
2150{
2151 if (!caps || !cfg)
2152 return;
2153
2154 cfg->phy_type_low = caps->phy_type_low;
2155 cfg->phy_type_high = caps->phy_type_high;
2156 cfg->caps = caps->caps;
2157 cfg->low_power_ctrl = caps->low_power_ctrl;
2158 cfg->eee_cap = caps->eee_cap;
2159 cfg->eeer_value = caps->eeer_value;
2160 cfg->link_fec_opt = caps->link_fec_options;
2161}
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172void
2173ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
2174{
2175 switch (fec) {
2176 case ICE_FEC_BASER:
2177
2178
2179
2180 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2181 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2182 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2183 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2184 ICE_AQC_PHY_FEC_25G_KR_REQ;
2185 break;
2186 case ICE_FEC_RS:
2187
2188
2189
2190 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2191 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2192 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2193 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2194 break;
2195 case ICE_FEC_NONE:
2196
2197 cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
2198 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2199 break;
2200 case ICE_FEC_AUTO:
2201
2202 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2203 break;
2204 }
2205}
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2217{
2218 struct ice_phy_info *phy_info;
2219 enum ice_status status = 0;
2220
2221 if (!pi || !link_up)
2222 return ICE_ERR_PARAM;
2223
2224 phy_info = &pi->phy;
2225
2226 if (phy_info->get_link_info) {
2227 status = ice_update_link_info(pi);
2228
2229 if (status)
2230 ice_debug(pi->hw, ICE_DBG_LINK,
2231 "get link status error, status = %d\n",
2232 status);
2233 }
2234
2235 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2236
2237 return status;
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248enum ice_status
2249ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2250 struct ice_sq_cd *cd)
2251{
2252 struct ice_aqc_restart_an *cmd;
2253 struct ice_aq_desc desc;
2254
2255 cmd = &desc.params.restart_an;
2256
2257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2258
2259 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2260 cmd->lport_num = pi->lport;
2261 if (ena_link)
2262 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2263 else
2264 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2265
2266 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2267}
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278enum ice_status
2279ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2280 struct ice_sq_cd *cd)
2281{
2282 struct ice_aqc_set_event_mask *cmd;
2283 struct ice_aq_desc desc;
2284
2285 cmd = &desc.params.set_event_mask;
2286
2287 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
2288
2289 cmd->lport_num = port_num;
2290
2291 cmd->event_mask = cpu_to_le16(mask);
2292 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2293}
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303enum ice_status
2304ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
2305{
2306 struct ice_aqc_set_mac_lb *cmd;
2307 struct ice_aq_desc desc;
2308
2309 cmd = &desc.params.set_mac_lb;
2310
2311 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
2312 if (ena_lpbk)
2313 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
2314
2315 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326enum ice_status
2327ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
2328 struct ice_sq_cd *cd)
2329{
2330 struct ice_aqc_set_port_id_led *cmd;
2331 struct ice_hw *hw = pi->hw;
2332 struct ice_aq_desc desc;
2333
2334 cmd = &desc.params.set_port_id_led;
2335
2336 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
2337
2338 if (is_orig_mode)
2339 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
2340 else
2341 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
2342
2343 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358static enum ice_status
2359__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
2360 u16 lut_size, u8 glob_lut_idx, bool set)
2361{
2362 struct ice_aqc_get_set_rss_lut *cmd_resp;
2363 struct ice_aq_desc desc;
2364 enum ice_status status;
2365 u16 flags = 0;
2366
2367 cmd_resp = &desc.params.get_set_rss_lut;
2368
2369 if (set) {
2370 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
2371 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2372 } else {
2373 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
2374 }
2375
2376 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2377 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
2378 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
2379 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
2380
2381 switch (lut_type) {
2382 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
2383 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
2384 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
2385 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
2386 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
2387 break;
2388 default:
2389 status = ICE_ERR_PARAM;
2390 goto ice_aq_get_set_rss_lut_exit;
2391 }
2392
2393 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
2394 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
2395 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
2396
2397 if (!set)
2398 goto ice_aq_get_set_rss_lut_send;
2399 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2400 if (!set)
2401 goto ice_aq_get_set_rss_lut_send;
2402 } else {
2403 goto ice_aq_get_set_rss_lut_send;
2404 }
2405
2406
2407 switch (lut_size) {
2408 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
2409 break;
2410 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
2411 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
2412 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2413 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2414 break;
2415 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
2416 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
2417 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
2418 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
2419 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
2420 break;
2421 }
2422
2423 default:
2424 status = ICE_ERR_PARAM;
2425 goto ice_aq_get_set_rss_lut_exit;
2426 }
2427
2428ice_aq_get_set_rss_lut_send:
2429 cmd_resp->flags = cpu_to_le16(flags);
2430 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
2431
2432ice_aq_get_set_rss_lut_exit:
2433 return status;
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446enum ice_status
2447ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2448 u8 *lut, u16 lut_size)
2449{
2450 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2451 return ICE_ERR_PARAM;
2452
2453 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2454 lut_type, lut, lut_size, 0, false);
2455}
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467enum ice_status
2468ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
2469 u8 *lut, u16 lut_size)
2470{
2471 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
2472 return ICE_ERR_PARAM;
2473
2474 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2475 lut_type, lut, lut_size, 0, true);
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487static enum
2488ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
2489 struct ice_aqc_get_set_rss_keys *key,
2490 bool set)
2491{
2492 struct ice_aqc_get_set_rss_key *cmd_resp;
2493 u16 key_size = sizeof(*key);
2494 struct ice_aq_desc desc;
2495
2496 cmd_resp = &desc.params.get_set_rss_key;
2497
2498 if (set) {
2499 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
2500 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2501 } else {
2502 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
2503 }
2504
2505 cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
2506 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
2507 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
2508 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
2509
2510 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
2511}
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521enum ice_status
2522ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
2523 struct ice_aqc_get_set_rss_keys *key)
2524{
2525 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
2526 return ICE_ERR_PARAM;
2527
2528 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2529 key, false);
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540enum ice_status
2541ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
2542 struct ice_aqc_get_set_rss_keys *keys)
2543{
2544 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
2545 return ICE_ERR_PARAM;
2546
2547 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
2548 keys, true);
2549}
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572static enum ice_status
2573ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2574 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
2575 struct ice_sq_cd *cd)
2576{
2577 u16 i, sum_header_size, sum_q_size = 0;
2578 struct ice_aqc_add_tx_qgrp *list;
2579 struct ice_aqc_add_txqs *cmd;
2580 struct ice_aq_desc desc;
2581
2582 cmd = &desc.params.add_txqs;
2583
2584 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
2585
2586 if (!qg_list)
2587 return ICE_ERR_PARAM;
2588
2589 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2590 return ICE_ERR_PARAM;
2591
2592 sum_header_size = num_qgrps *
2593 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
2594
2595 list = qg_list;
2596 for (i = 0; i < num_qgrps; i++) {
2597 struct ice_aqc_add_txqs_perq *q = list->txqs;
2598
2599 sum_q_size += list->num_txqs * sizeof(*q);
2600 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
2601 }
2602
2603 if (buf_size != (sum_header_size + sum_q_size))
2604 return ICE_ERR_PARAM;
2605
2606 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2607
2608 cmd->num_qgrps = num_qgrps;
2609
2610 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2611}
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625static enum ice_status
2626ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
2627 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
2628 enum ice_disq_rst_src rst_src, u16 vmvf_num,
2629 struct ice_sq_cd *cd)
2630{
2631 struct ice_aqc_dis_txqs *cmd;
2632 struct ice_aq_desc desc;
2633 enum ice_status status;
2634 u16 i, sz = 0;
2635
2636 cmd = &desc.params.dis_txqs;
2637 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
2638
2639
2640 if (!qg_list && !rst_src)
2641 return ICE_ERR_PARAM;
2642
2643 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
2644 return ICE_ERR_PARAM;
2645
2646 cmd->num_entries = num_qgrps;
2647
2648 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
2649 ICE_AQC_Q_DIS_TIMEOUT_M);
2650
2651 switch (rst_src) {
2652 case ICE_VM_RESET:
2653 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
2654 cmd->vmvf_and_timeout |=
2655 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
2656 break;
2657 case ICE_VF_RESET:
2658 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
2659
2660 cmd->vmvf_and_timeout |=
2661 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
2662 ICE_AQC_Q_DIS_VMVF_NUM_M);
2663 break;
2664 case ICE_NO_RESET:
2665 default:
2666 break;
2667 }
2668
2669
2670 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
2671
2672 if (!qg_list)
2673 goto do_aq;
2674
2675
2676
2677
2678 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
2679
2680 for (i = 0; i < num_qgrps; ++i) {
2681
2682 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
2683
2684
2685 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
2686
2687
2688 if ((qg_list[i].num_qs % 2) == 0)
2689 sz += 2;
2690 }
2691
2692 if (buf_size != sz)
2693 return ICE_ERR_PARAM;
2694
2695do_aq:
2696 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
2697 if (status) {
2698 if (!qg_list)
2699 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
2700 vmvf_num, hw->adminq.sq_last_status);
2701 else
2702 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
2703 le16_to_cpu(qg_list[0].q_id[0]),
2704 hw->adminq.sq_last_status);
2705 }
2706 return status;
2707}
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717static void
2718ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2719{
2720 u8 src_byte, dest_byte, mask;
2721 u8 *from, *dest;
2722 u16 shift_width;
2723
2724
2725 from = src_ctx + ce_info->offset;
2726
2727
2728 shift_width = ce_info->lsb % 8;
2729 mask = (u8)(BIT(ce_info->width) - 1);
2730
2731 src_byte = *from;
2732 src_byte &= mask;
2733
2734
2735 mask <<= shift_width;
2736 src_byte <<= shift_width;
2737
2738
2739 dest = dest_ctx + (ce_info->lsb / 8);
2740
2741 memcpy(&dest_byte, dest, sizeof(dest_byte));
2742
2743 dest_byte &= ~mask;
2744 dest_byte |= src_byte;
2745
2746
2747 memcpy(dest, &dest_byte, sizeof(dest_byte));
2748}
2749
2750
2751
2752
2753
2754
2755
2756static void
2757ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2758{
2759 u16 src_word, mask;
2760 __le16 dest_word;
2761 u8 *from, *dest;
2762 u16 shift_width;
2763
2764
2765 from = src_ctx + ce_info->offset;
2766
2767
2768 shift_width = ce_info->lsb % 8;
2769 mask = BIT(ce_info->width) - 1;
2770
2771
2772
2773
2774 src_word = *(u16 *)from;
2775 src_word &= mask;
2776
2777
2778 mask <<= shift_width;
2779 src_word <<= shift_width;
2780
2781
2782 dest = dest_ctx + (ce_info->lsb / 8);
2783
2784 memcpy(&dest_word, dest, sizeof(dest_word));
2785
2786 dest_word &= ~(cpu_to_le16(mask));
2787 dest_word |= cpu_to_le16(src_word);
2788
2789
2790 memcpy(dest, &dest_word, sizeof(dest_word));
2791}
2792
2793
2794
2795
2796
2797
2798
2799static void
2800ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2801{
2802 u32 src_dword, mask;
2803 __le32 dest_dword;
2804 u8 *from, *dest;
2805 u16 shift_width;
2806
2807
2808 from = src_ctx + ce_info->offset;
2809
2810
2811 shift_width = ce_info->lsb % 8;
2812
2813
2814
2815
2816
2817 if (ce_info->width < 32)
2818 mask = BIT(ce_info->width) - 1;
2819 else
2820 mask = (u32)~0;
2821
2822
2823
2824
2825 src_dword = *(u32 *)from;
2826 src_dword &= mask;
2827
2828
2829 mask <<= shift_width;
2830 src_dword <<= shift_width;
2831
2832
2833 dest = dest_ctx + (ce_info->lsb / 8);
2834
2835 memcpy(&dest_dword, dest, sizeof(dest_dword));
2836
2837 dest_dword &= ~(cpu_to_le32(mask));
2838 dest_dword |= cpu_to_le32(src_dword);
2839
2840
2841 memcpy(dest, &dest_dword, sizeof(dest_dword));
2842}
2843
2844
2845
2846
2847
2848
2849
2850static void
2851ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2852{
2853 u64 src_qword, mask;
2854 __le64 dest_qword;
2855 u8 *from, *dest;
2856 u16 shift_width;
2857
2858
2859 from = src_ctx + ce_info->offset;
2860
2861
2862 shift_width = ce_info->lsb % 8;
2863
2864
2865
2866
2867
2868 if (ce_info->width < 64)
2869 mask = BIT_ULL(ce_info->width) - 1;
2870 else
2871 mask = (u64)~0;
2872
2873
2874
2875
2876 src_qword = *(u64 *)from;
2877 src_qword &= mask;
2878
2879
2880 mask <<= shift_width;
2881 src_qword <<= shift_width;
2882
2883
2884 dest = dest_ctx + (ce_info->lsb / 8);
2885
2886 memcpy(&dest_qword, dest, sizeof(dest_qword));
2887
2888 dest_qword &= ~(cpu_to_le64(mask));
2889 dest_qword |= cpu_to_le64(src_qword);
2890
2891
2892 memcpy(dest, &dest_qword, sizeof(dest_qword));
2893}
2894
2895
2896
2897
2898
2899
2900
2901enum ice_status
2902ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
2903{
2904 int f;
2905
2906 for (f = 0; ce_info[f].width; f++) {
2907
2908
2909
2910
2911 switch (ce_info[f].size_of) {
2912 case sizeof(u8):
2913 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
2914 break;
2915 case sizeof(u16):
2916 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
2917 break;
2918 case sizeof(u32):
2919 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
2920 break;
2921 case sizeof(u64):
2922 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
2923 break;
2924 default:
2925 return ICE_ERR_INVAL_SIZE;
2926 }
2927 }
2928
2929 return 0;
2930}
2931
2932
2933
2934
2935
2936
2937
2938
2939static struct ice_q_ctx *
2940ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
2941{
2942 struct ice_vsi_ctx *vsi;
2943 struct ice_q_ctx *q_ctx;
2944
2945 vsi = ice_get_vsi_ctx(hw, vsi_handle);
2946 if (!vsi)
2947 return NULL;
2948 if (q_handle >= vsi->num_lan_q_entries[tc])
2949 return NULL;
2950 if (!vsi->lan_q_ctx[tc])
2951 return NULL;
2952 q_ctx = vsi->lan_q_ctx[tc];
2953 return &q_ctx[q_handle];
2954}
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969enum ice_status
2970ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
2971 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
2972 struct ice_sq_cd *cd)
2973{
2974 struct ice_aqc_txsched_elem_data node = { 0 };
2975 struct ice_sched_node *parent;
2976 struct ice_q_ctx *q_ctx;
2977 enum ice_status status;
2978 struct ice_hw *hw;
2979
2980 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
2981 return ICE_ERR_CFG;
2982
2983 if (num_qgrps > 1 || buf->num_txqs > 1)
2984 return ICE_ERR_MAX_LIMIT;
2985
2986 hw = pi->hw;
2987
2988 if (!ice_is_vsi_valid(hw, vsi_handle))
2989 return ICE_ERR_PARAM;
2990
2991 mutex_lock(&pi->sched_lock);
2992
2993 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
2994 if (!q_ctx) {
2995 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
2996 q_handle);
2997 status = ICE_ERR_PARAM;
2998 goto ena_txq_exit;
2999 }
3000
3001
3002 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
3003 ICE_SCHED_NODE_OWNER_LAN);
3004 if (!parent) {
3005 status = ICE_ERR_PARAM;
3006 goto ena_txq_exit;
3007 }
3008
3009 buf->parent_teid = parent->info.node_teid;
3010 node.parent_teid = parent->info.node_teid;
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
3023
3024
3025 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
3026 if (status) {
3027 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
3028 le16_to_cpu(buf->txqs[0].txq_id),
3029 hw->adminq.sq_last_status);
3030 goto ena_txq_exit;
3031 }
3032
3033 node.node_teid = buf->txqs[0].q_teid;
3034 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
3035 q_ctx->q_handle = q_handle;
3036
3037
3038 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
3039
3040ena_txq_exit:
3041 mutex_unlock(&pi->sched_lock);
3042 return status;
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060enum ice_status
3061ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
3062 u16 *q_handles, u16 *q_ids, u32 *q_teids,
3063 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3064 struct ice_sq_cd *cd)
3065{
3066 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
3067 struct ice_aqc_dis_txq_item qg_list;
3068 struct ice_q_ctx *q_ctx;
3069 u16 i;
3070
3071 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3072 return ICE_ERR_CFG;
3073
3074 if (!num_queues) {
3075
3076
3077
3078
3079 if (rst_src)
3080 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
3081 vmvf_num, NULL);
3082 return ICE_ERR_CFG;
3083 }
3084
3085 mutex_lock(&pi->sched_lock);
3086
3087 for (i = 0; i < num_queues; i++) {
3088 struct ice_sched_node *node;
3089
3090 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
3091 if (!node)
3092 continue;
3093 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
3094 if (!q_ctx) {
3095 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
3096 q_handles[i]);
3097 continue;
3098 }
3099 if (q_ctx->q_handle != q_handles[i]) {
3100 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
3101 q_ctx->q_handle, q_handles[i]);
3102 continue;
3103 }
3104 qg_list.parent_teid = node->info.parent_teid;
3105 qg_list.num_qs = 1;
3106 qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
3107 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
3108 sizeof(qg_list), rst_src, vmvf_num,
3109 cd);
3110
3111 if (status)
3112 break;
3113 ice_free_sched_node(pi, node);
3114 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
3115 }
3116 mutex_unlock(&pi->sched_lock);
3117 return status;
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130static enum ice_status
3131ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3132 u16 *maxqs, u8 owner)
3133{
3134 enum ice_status status = 0;
3135 u8 i;
3136
3137 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
3138 return ICE_ERR_CFG;
3139
3140 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3141 return ICE_ERR_PARAM;
3142
3143 mutex_lock(&pi->sched_lock);
3144
3145 ice_for_each_traffic_class(i) {
3146
3147 if (!ice_sched_get_tc_node(pi, i))
3148 continue;
3149
3150 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
3151 ice_is_tc_ena(tc_bitmap, i));
3152 if (status)
3153 break;
3154 }
3155
3156 mutex_unlock(&pi->sched_lock);
3157 return status;
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169enum ice_status
3170ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
3171 u16 *max_lanqs)
3172{
3173 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
3174 ICE_SCHED_NODE_OWNER_LAN);
3175}
3176
3177
3178
3179
3180
3181
3182
3183static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
3184{
3185 struct ice_switch_info *sw = hw->switch_info;
3186 u8 i;
3187
3188
3189 ice_rm_all_sw_replay_rule_info(hw);
3190
3191
3192
3193
3194 for (i = 0; i < ICE_SW_LKUP_LAST; i++)
3195 list_replace_init(&sw->recp_list[i].filt_rules,
3196 &sw->recp_list[i].filt_replay_rules);
3197
3198 return 0;
3199}
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
3210{
3211 enum ice_status status;
3212
3213 if (!ice_is_vsi_valid(hw, vsi_handle))
3214 return ICE_ERR_PARAM;
3215
3216
3217 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
3218 status = ice_replay_pre_init(hw);
3219 if (status)
3220 return status;
3221 }
3222
3223
3224 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
3225 return status;
3226}
3227
3228
3229
3230
3231
3232
3233
3234void ice_replay_post(struct ice_hw *hw)
3235{
3236
3237 ice_rm_all_sw_replay_rule_info(hw);
3238}
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249void
3250ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
3251 bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
3252{
3253 u64 new_data;
3254
3255 new_data = rd32(hw, loreg);
3256 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
3257
3258
3259
3260
3261
3262
3263 if (!prev_stat_loaded)
3264 *prev_stat = new_data;
3265 if (new_data >= *prev_stat)
3266 *cur_stat = new_data - *prev_stat;
3267 else
3268
3269 *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
3270 *cur_stat &= 0xFFFFFFFFFFULL;
3271}
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281void
3282ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
3283 u64 *prev_stat, u64 *cur_stat)
3284{
3285 u32 new_data;
3286
3287 new_data = rd32(hw, reg);
3288
3289
3290
3291
3292
3293
3294 if (!prev_stat_loaded)
3295 *prev_stat = new_data;
3296 if (new_data >= *prev_stat)
3297 *cur_stat = new_data - *prev_stat;
3298 else
3299
3300 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
3301}
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311enum ice_status
3312ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
3313 struct ice_aqc_get_elem *buf)
3314{
3315 u16 buf_size, num_elem_ret = 0;
3316 enum ice_status status;
3317
3318 buf_size = sizeof(*buf);
3319 memset(buf, 0, buf_size);
3320 buf->generic[0].node_teid = cpu_to_le32(node_teid);
3321 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
3322 NULL);
3323 if (status || num_elem_ret != 1)
3324 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
3325 return status;
3326}
3327