1
2
3
4#include "ice.h"
5#include "ice_dcb.h"
6#include "ice_dcb_lib.h"
7#include "ice_dcb_nl.h"
8#include <net/dcbnl.h>
9
10
11
12
13
14static void ice_dcbnl_devreset(struct net_device *netdev)
15{
16 struct ice_pf *pf = ice_netdev_to_pf(netdev);
17
18 while (ice_is_reset_in_progress(pf->state))
19 usleep_range(1000, 2000);
20
21 dev_close(netdev);
22 netdev_state_change(netdev);
23 dev_open(netdev, NULL);
24 netdev_state_change(netdev);
25}
26
27
28
29
30
31
32static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
33{
34 struct ice_dcbx_cfg *dcbxcfg;
35 struct ice_pf *pf;
36
37 pf = ice_netdev_to_pf(netdev);
38 dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
39
40 ets->willing = dcbxcfg->etscfg.willing;
41 ets->ets_cap = dcbxcfg->etscfg.maxtcs;
42 ets->cbs = dcbxcfg->etscfg.cbs;
43 memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
44 memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
45 memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
46 memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
47 memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
48 sizeof(ets->tc_reco_bw));
49 memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
50 sizeof(ets->tc_reco_tsa));
51 memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
52 sizeof(ets->reco_prio_tc));
53
54 return 0;
55}
56
57
58
59
60
61
62static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
63{
64 struct ice_pf *pf = ice_netdev_to_pf(netdev);
65 struct ice_dcbx_cfg *new_cfg;
66 int bwcfg = 0, bwrec = 0;
67 int err, i;
68
69 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
70 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
71 return -EINVAL;
72
73 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
74
75 mutex_lock(&pf->tc_mutex);
76
77 new_cfg->etscfg.willing = ets->willing;
78 new_cfg->etscfg.cbs = ets->cbs;
79 ice_for_each_traffic_class(i) {
80 new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
81 bwcfg += ets->tc_tx_bw[i];
82 new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
83 if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
84
85 new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
86 new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
87 }
88 new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
89 bwrec += ets->tc_reco_bw[i];
90 new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
91 }
92
93 if (ice_dcb_bwchk(pf, new_cfg)) {
94 err = -EINVAL;
95 goto ets_out;
96 }
97
98 new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
99
100 if (!bwcfg)
101 new_cfg->etscfg.tcbwtable[0] = 100;
102
103 if (!bwrec)
104 new_cfg->etsrec.tcbwtable[0] = 100;
105
106 err = ice_pf_dcb_cfg(pf, new_cfg, true);
107
108 if (err == ICE_DCB_HW_CHG_RST)
109 ice_dcbnl_devreset(netdev);
110 if (err == ICE_DCB_NO_HW_CHG)
111 err = ICE_DCB_HW_CHG_RST;
112
113ets_out:
114 mutex_unlock(&pf->tc_mutex);
115 return err;
116}
117
118
119
120
121
122
123
124
125
126static int
127ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
128{
129 struct ice_pf *pf = ice_netdev_to_pf(dev);
130
131 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
132 return -EINVAL;
133
134 *num = pf->hw.func_caps.common_cap.maxtc;
135 return 0;
136}
137
138
139
140
141
142static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
143{
144 struct ice_pf *pf = ice_netdev_to_pf(netdev);
145
146 return pf->dcbx_cap;
147}
148
149
150
151
152
153
154static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
155{
156 struct ice_pf *pf = ice_netdev_to_pf(netdev);
157 struct ice_qos_cfg *qos_cfg;
158
159
160 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
161 return ICE_DCB_NO_HW_CHG;
162
163
164 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
165 ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
166 !(mode & DCB_CAP_DCBX_HOST))
167 return ICE_DCB_NO_HW_CHG;
168
169
170 if (mode == pf->dcbx_cap)
171 return ICE_DCB_NO_HW_CHG;
172
173 qos_cfg = &pf->hw.port_info->qos_cfg;
174
175
176 if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
177 return ICE_DCB_NO_HW_CHG;
178
179 pf->dcbx_cap = mode;
180
181 if (mode & DCB_CAP_DCBX_VER_CEE)
182 qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
183 else
184 qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
185
186 dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
187 return ICE_DCB_HW_CHG_RST;
188}
189
190
191
192
193
194
195static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
196{
197 struct ice_pf *pf = ice_netdev_to_pf(netdev);
198 struct ice_port_info *pi = pf->hw.port_info;
199 int i, j;
200
201 memset(perm_addr, 0xff, MAX_ADDR_LEN);
202
203 for (i = 0; i < netdev->addr_len; i++)
204 perm_addr[i] = pi->mac.perm_addr[i];
205
206 for (j = 0; j < netdev->addr_len; j++, i++)
207 perm_addr[i] = pi->mac.perm_addr[j];
208}
209
210
211
212
213
214
215static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
216{
217 u32 val;
218
219 val = rd32(hw, PRTDCB_GENC);
220 *delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
221}
222
223
224
225
226
227
228static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
229{
230 struct ice_pf *pf = ice_netdev_to_pf(netdev);
231 struct ice_port_info *pi = pf->hw.port_info;
232 struct ice_dcbx_cfg *dcbxcfg;
233 int i;
234
235 dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
236 pfc->pfc_cap = dcbxcfg->pfc.pfccap;
237 pfc->pfc_en = dcbxcfg->pfc.pfcena;
238 pfc->mbc = dcbxcfg->pfc.mbc;
239 ice_get_pfc_delay(&pf->hw, &pfc->delay);
240
241 ice_for_each_traffic_class(i) {
242 pfc->requests[i] = pf->stats.priority_xoff_tx[i];
243 pfc->indications[i] = pf->stats.priority_xoff_rx[i];
244 }
245
246 return 0;
247}
248
249
250
251
252
253
254static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
255{
256 struct ice_pf *pf = ice_netdev_to_pf(netdev);
257 struct ice_dcbx_cfg *new_cfg;
258 int err;
259
260 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
261 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
262 return -EINVAL;
263
264 mutex_lock(&pf->tc_mutex);
265
266 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
267
268 if (pfc->pfc_cap)
269 new_cfg->pfc.pfccap = pfc->pfc_cap;
270 else
271 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
272
273 new_cfg->pfc.pfcena = pfc->pfc_en;
274
275 err = ice_pf_dcb_cfg(pf, new_cfg, true);
276 if (err == ICE_DCB_HW_CHG_RST)
277 ice_dcbnl_devreset(netdev);
278 if (err == ICE_DCB_NO_HW_CHG)
279 err = ICE_DCB_HW_CHG_RST;
280 mutex_unlock(&pf->tc_mutex);
281 return err;
282}
283
284
285
286
287
288
289
290static void
291ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
292{
293 struct ice_pf *pf = ice_netdev_to_pf(netdev);
294 struct ice_port_info *pi = pf->hw.port_info;
295
296 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
297 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
298 return;
299
300 if (prio >= ICE_MAX_USER_PRIORITY)
301 return;
302
303 *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
304 dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
305 prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
306}
307
308
309
310
311
312
313
314static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
315{
316 struct ice_pf *pf = ice_netdev_to_pf(netdev);
317 struct ice_dcbx_cfg *new_cfg;
318
319 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
320 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
321 return;
322
323 if (prio >= ICE_MAX_USER_PRIORITY)
324 return;
325
326 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
327
328 new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
329 if (set)
330 new_cfg->pfc.pfcena |= BIT(prio);
331 else
332 new_cfg->pfc.pfcena &= ~BIT(prio);
333
334 dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
335 prio, set, new_cfg->pfc.pfcena);
336}
337
338
339
340
341
342static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
343{
344 struct ice_pf *pf = ice_netdev_to_pf(netdev);
345 struct ice_port_info *pi = pf->hw.port_info;
346
347
348 if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
349 return 1;
350
351 return 0;
352}
353
354
355
356
357
358static u8 ice_dcbnl_getstate(struct net_device *netdev)
359{
360 struct ice_pf *pf = ice_netdev_to_pf(netdev);
361 u8 state = 0;
362
363 state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
364
365 dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
366 return state;
367}
368
369
370
371
372
373
374static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
375{
376 struct ice_pf *pf = ice_netdev_to_pf(netdev);
377
378 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
379 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
380 return ICE_DCB_NO_HW_CHG;
381
382
383 if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
384 return ICE_DCB_NO_HW_CHG;
385
386 if (state) {
387 set_bit(ICE_FLAG_DCB_ENA, pf->flags);
388 memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
389 &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
390 sizeof(struct ice_dcbx_cfg));
391 } else {
392 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
393 }
394
395 return ICE_DCB_HW_CHG;
396}
397
398
399
400
401
402
403
404
405
406
407static void
408ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
409 u8 __always_unused *prio_type, u8 *pgid,
410 u8 __always_unused *bw_pct,
411 u8 __always_unused *up_map)
412{
413 struct ice_pf *pf = ice_netdev_to_pf(netdev);
414 struct ice_port_info *pi = pf->hw.port_info;
415
416 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
417 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
418 return;
419
420 if (prio >= ICE_MAX_USER_PRIORITY)
421 return;
422
423 *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
424 dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
425 *pgid);
426}
427
428
429
430
431
432
433
434
435
436
437static void
438ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
439 u8 __always_unused prio_type,
440 u8 __always_unused bwg_id,
441 u8 __always_unused bw_pct, u8 up_map)
442{
443 struct ice_pf *pf = ice_netdev_to_pf(netdev);
444 struct ice_dcbx_cfg *new_cfg;
445 int i;
446
447 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
448 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
449 return;
450
451 if (tc >= ICE_MAX_TRAFFIC_CLASS)
452 return;
453
454 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
455
456
457
458 ice_for_each_traffic_class(i) {
459 if (up_map & BIT(i))
460 new_cfg->etscfg.prio_table[i] = tc;
461 }
462 new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
463}
464
465
466
467
468
469
470
471static void
472ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
473{
474 struct ice_pf *pf = ice_netdev_to_pf(netdev);
475 struct ice_port_info *pi = pf->hw.port_info;
476
477 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
478 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
479 return;
480
481 if (pgid >= ICE_MAX_TRAFFIC_CLASS)
482 return;
483
484 *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
485 dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
486 pgid, *bw_pct);
487}
488
489
490
491
492
493
494
495static void
496ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
497{
498 struct ice_pf *pf = ice_netdev_to_pf(netdev);
499 struct ice_dcbx_cfg *new_cfg;
500
501 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
502 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
503 return;
504
505 if (pgid >= ICE_MAX_TRAFFIC_CLASS)
506 return;
507
508 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
509
510 new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
511}
512
513
514
515
516
517
518
519
520
521
522static void
523ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
524 u8 __always_unused *prio_type, u8 *pgid,
525 u8 __always_unused *bw_pct,
526 u8 __always_unused *up_map)
527{
528 struct ice_pf *pf = ice_netdev_to_pf(netdev);
529 struct ice_port_info *pi = pf->hw.port_info;
530
531 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
532 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
533 return;
534
535 if (prio >= ICE_MAX_USER_PRIORITY)
536 return;
537
538 *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
539}
540
541
542
543
544
545
546
547
548
549
550
551
552static void
553ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
554 int __always_unused prio,
555 u8 __always_unused prio_type,
556 u8 __always_unused pgid,
557 u8 __always_unused bw_pct,
558 u8 __always_unused up_map)
559{
560 struct ice_pf *pf = ice_netdev_to_pf(netdev);
561
562 dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
563}
564
565
566
567
568
569
570
571static void
572ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
573 u8 *bw_pct)
574{
575 struct ice_pf *pf = ice_netdev_to_pf(netdev);
576
577 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
578 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
579 return;
580
581 *bw_pct = 0;
582}
583
584
585
586
587
588
589
590
591
592static void
593ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
594 u8 __always_unused bw_pct)
595{
596 struct ice_pf *pf = ice_netdev_to_pf(netdev);
597
598 dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
599}
600
601
602
603
604
605
606
607static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
608{
609 struct ice_pf *pf = ice_netdev_to_pf(netdev);
610
611 if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
612 return ICE_DCB_NO_HW_CHG;
613
614 switch (capid) {
615 case DCB_CAP_ATTR_PG:
616 *cap = true;
617 break;
618 case DCB_CAP_ATTR_PFC:
619 *cap = true;
620 break;
621 case DCB_CAP_ATTR_UP2TC:
622 *cap = false;
623 break;
624 case DCB_CAP_ATTR_PG_TCS:
625 *cap = 0x80;
626 break;
627 case DCB_CAP_ATTR_PFC_TCS:
628 *cap = 0x80;
629 break;
630 case DCB_CAP_ATTR_GSP:
631 *cap = false;
632 break;
633 case DCB_CAP_ATTR_BCN:
634 *cap = false;
635 break;
636 case DCB_CAP_ATTR_DCBX:
637 *cap = pf->dcbx_cap;
638 break;
639 default:
640 *cap = false;
641 break;
642 }
643
644 dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
645 capid, *cap);
646 return 0;
647}
648
649
650
651
652
653
654
655static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
656{
657 struct ice_pf *pf = ice_netdev_to_pf(netdev);
658 struct dcb_app app = {
659 .selector = idtype,
660 .protocol = id,
661 };
662
663 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
664 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
665 return -EINVAL;
666
667 return dcb_getapp(netdev, &app);
668}
669
670
671
672
673
674
675static bool
676ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
677 struct ice_dcb_app_priority_table *app)
678{
679 unsigned int i;
680
681 for (i = 0; i < cfg->numapps; i++) {
682 if (app->selector == cfg->app[i].selector &&
683 app->prot_id == cfg->app[i].prot_id &&
684 app->priority == cfg->app[i].priority)
685 return true;
686 }
687
688 return false;
689}
690
691#define ICE_BYTES_PER_DSCP_VAL 8
692
693
694
695
696
697
698static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
699{
700 struct ice_pf *pf = ice_netdev_to_pf(netdev);
701 struct ice_dcb_app_priority_table new_app;
702 struct ice_dcbx_cfg *old_cfg, *new_cfg;
703 u8 max_tc;
704 int ret;
705
706
707 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
708 return -EINVAL;
709
710
711 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
712 netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
713 return -EINVAL;
714 }
715
716 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
717 return -EINVAL;
718
719 if (!ice_is_feature_supported(pf, ICE_F_DSCP))
720 return -EOPNOTSUPP;
721
722 if (app->protocol >= ICE_DSCP_NUM_VAL) {
723 netdev_err(netdev, "DSCP value 0x%04X out of range\n",
724 app->protocol);
725 return -EINVAL;
726 }
727
728 max_tc = pf->hw.func_caps.common_cap.maxtc;
729 if (app->priority >= max_tc) {
730 netdev_err(netdev, "TC %d out of range, max TC %d\n",
731 app->priority, max_tc);
732 return -EINVAL;
733 }
734
735
736 mutex_lock(&pf->tc_mutex);
737
738 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
739 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
740
741 ret = dcb_ieee_setapp(netdev, app);
742 if (ret)
743 goto setapp_out;
744
745 if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
746 netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
747 app->protocol);
748 ret = dcb_ieee_delapp(netdev, app);
749 if (ret)
750 netdev_err(netdev, "Failed to delete re-mapping TLV\n");
751 ret = -EINVAL;
752 goto setapp_out;
753 }
754
755 new_app.selector = app->selector;
756 new_app.prot_id = app->protocol;
757 new_app.priority = app->priority;
758
759
760 if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
761 int i, j;
762
763
764 ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
765 NULL);
766 if (ret) {
767 netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
768 ret);
769 goto setapp_out;
770 }
771 netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
772
773 new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
774
775
776 new_cfg->etscfg.willing = 0;
777 new_cfg->pfc.pfccap = max_tc;
778 new_cfg->pfc.willing = 0;
779
780 for (i = 0; i < max_tc; i++)
781 for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
782 int dscp, offset;
783
784 dscp = (i * max_tc) + j;
785 offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
786
787 new_cfg->dscp_map[dscp] = i;
788
789 if (max_tc < ICE_MAX_TRAFFIC_CLASS)
790 new_cfg->dscp_map[dscp + offset] = i;
791 }
792
793 new_cfg->etscfg.tcbwtable[0] = 100;
794 new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
795 new_cfg->etscfg.prio_table[0] = 0;
796
797 for (i = 1; i < max_tc; i++) {
798 new_cfg->etscfg.tcbwtable[i] = 0;
799 new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
800 new_cfg->etscfg.prio_table[i] = i;
801 }
802 }
803
804
805 new_cfg->dscp_map[app->protocol] = app->priority;
806 new_cfg->app[new_cfg->numapps++] = new_app;
807
808 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
809
810 if (ret == ICE_DCB_HW_CHG_RST)
811 ice_dcbnl_devreset(netdev);
812 else
813 ret = ICE_DCB_NO_HW_CHG;
814
815setapp_out:
816 mutex_unlock(&pf->tc_mutex);
817 return ret;
818}
819
820
821
822
823
824
825
826
827static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
828{
829 struct ice_pf *pf = ice_netdev_to_pf(netdev);
830 struct ice_dcbx_cfg *old_cfg, *new_cfg;
831 unsigned int i, j;
832 int ret = 0;
833
834 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
835 netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
836 return -EINVAL;
837 }
838
839 mutex_lock(&pf->tc_mutex);
840 old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
841
842 ret = dcb_ieee_delapp(netdev, app);
843 if (ret)
844 goto delapp_out;
845
846 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
847
848 for (i = 0; i < new_cfg->numapps; i++) {
849 if (app->selector == new_cfg->app[i].selector &&
850 app->protocol == new_cfg->app[i].prot_id &&
851 app->priority == new_cfg->app[i].priority) {
852 new_cfg->app[i].selector = 0;
853 new_cfg->app[i].prot_id = 0;
854 new_cfg->app[i].priority = 0;
855 break;
856 }
857 }
858
859
860 if (i == new_cfg->numapps) {
861 ret = -EINVAL;
862 goto delapp_out;
863 }
864
865 new_cfg->numapps--;
866
867 for (j = i; j < new_cfg->numapps; j++) {
868 new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
869 new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
870 new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
871 }
872
873
874 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
875 !ice_is_feature_supported(pf, ICE_F_DSCP)) {
876 ret = ICE_DCB_HW_CHG;
877 goto delapp_out;
878 }
879
880
881 clear_bit(app->protocol, new_cfg->dscp_mapped);
882
883 new_cfg->dscp_map[app->protocol] = app->protocol %
884 ICE_BYTES_PER_DSCP_VAL;
885
886
887
888
889 if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
890 new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
891 ret = ice_aq_set_pfc_mode(&pf->hw,
892 ICE_AQC_PFC_VLAN_BASED_PFC,
893 NULL);
894 if (ret) {
895 netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
896 ret);
897 goto delapp_out;
898 }
899 netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
900
901 new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
902
903 ret = ice_dcb_sw_dflt_cfg(pf, true, true);
904 } else {
905 ret = ice_pf_dcb_cfg(pf, new_cfg, true);
906 }
907
908
909
910
911 if (ret == ICE_DCB_HW_CHG_RST)
912 ice_dcbnl_devreset(netdev);
913
914
915
916
917
918 if (ret == ICE_DCB_NO_HW_CHG)
919 ret = ICE_DCB_HW_CHG;
920
921delapp_out:
922 mutex_unlock(&pf->tc_mutex);
923 return ret;
924}
925
926
927
928
929
930static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
931{
932 struct ice_pf *pf = ice_netdev_to_pf(netdev);
933 struct ice_dcbx_cfg *new_cfg;
934 int err;
935
936 if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
937 !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
938 return ICE_DCB_NO_HW_CHG;
939
940 new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
941
942 mutex_lock(&pf->tc_mutex);
943
944 err = ice_pf_dcb_cfg(pf, new_cfg, true);
945
946 mutex_unlock(&pf->tc_mutex);
947 return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
948}
949
950static const struct dcbnl_rtnl_ops dcbnl_ops = {
951
952 .ieee_getets = ice_dcbnl_getets,
953 .ieee_setets = ice_dcbnl_setets,
954 .ieee_getpfc = ice_dcbnl_getpfc,
955 .ieee_setpfc = ice_dcbnl_setpfc,
956 .ieee_setapp = ice_dcbnl_setapp,
957 .ieee_delapp = ice_dcbnl_delapp,
958
959
960 .getstate = ice_dcbnl_getstate,
961 .setstate = ice_dcbnl_setstate,
962 .getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
963 .setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
964 .setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
965 .setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
966 .setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
967 .getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
968 .getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
969 .getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
970 .getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
971 .setpfccfg = ice_dcbnl_set_pfc_cfg,
972 .getpfccfg = ice_dcbnl_get_pfc_cfg,
973 .setall = ice_dcbnl_cee_set_all,
974 .getcap = ice_dcbnl_get_cap,
975 .getnumtcs = ice_dcbnl_getnumtcs,
976 .getpfcstate = ice_dcbnl_getpfcstate,
977 .getapp = ice_dcbnl_getapp,
978
979
980 .getdcbx = ice_dcbnl_getdcbx,
981 .setdcbx = ice_dcbnl_setdcbx,
982};
983
984
985
986
987
988void ice_dcbnl_set_all(struct ice_vsi *vsi)
989{
990 struct net_device *netdev = vsi->netdev;
991 struct ice_dcbx_cfg *dcbxcfg;
992 struct ice_port_info *pi;
993 struct dcb_app sapp;
994 struct ice_pf *pf;
995 unsigned int i;
996
997 if (!netdev)
998 return;
999
1000 pf = ice_netdev_to_pf(netdev);
1001 pi = pf->hw.port_info;
1002
1003
1004 if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
1005 return;
1006
1007
1008 if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1009 return;
1010
1011 dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
1012
1013 for (i = 0; i < dcbxcfg->numapps; i++) {
1014 u8 prio, tc_map;
1015
1016 prio = dcbxcfg->app[i].priority;
1017 tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
1018
1019
1020 if (tc_map & vsi->tc_cfg.ena_tc) {
1021 sapp.selector = dcbxcfg->app[i].selector;
1022 sapp.protocol = dcbxcfg->app[i].prot_id;
1023 sapp.priority = prio;
1024 dcb_ieee_setapp(netdev, &sapp);
1025 }
1026 }
1027
1028 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038static void
1039ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
1040 struct ice_dcb_app_priority_table *app)
1041{
1042 struct dcb_app sapp;
1043 int err;
1044
1045 sapp.selector = app->selector;
1046 sapp.protocol = app->prot_id;
1047 sapp.priority = app->priority;
1048 err = ice_dcbnl_delapp(vsi->netdev, &sapp);
1049 dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
1050 vsi->idx, err, app->selector, app->prot_id, app->priority);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void
1063ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
1064 struct ice_dcbx_cfg *new_cfg)
1065{
1066 struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
1067 unsigned int i;
1068
1069 if (!main_vsi)
1070 return;
1071
1072 for (i = 0; i < old_cfg->numapps; i++) {
1073 struct ice_dcb_app_priority_table app = old_cfg->app[i];
1074
1075
1076 if (!ice_dcbnl_find_app(new_cfg, &app))
1077 ice_dcbnl_vsi_del_app(main_vsi, &app);
1078 }
1079}
1080
1081
1082
1083
1084
1085void ice_dcbnl_setup(struct ice_vsi *vsi)
1086{
1087 struct net_device *netdev = vsi->netdev;
1088 struct ice_pf *pf;
1089
1090 pf = ice_netdev_to_pf(netdev);
1091 if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
1092 return;
1093
1094 netdev->dcbnl_ops = &dcbnl_ops;
1095 ice_dcbnl_set_all(vsi);
1096}
1097