1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/dcbnl.h>
35#include <linux/math64.h>
36
37#include "mlx4_en.h"
38
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets)
41{
42 struct mlx4_en_priv *priv = netdev_priv(dev);
43 struct ieee_ets *my_ets = &priv->ets;
44
45
46 if (!my_ets)
47 return -EINVAL;
48
49 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
50 ets->cbs = my_ets->cbs;
51 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
52 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
53 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
54
55 return 0;
56}
57
58static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
59{
60 int i;
61 int total_ets_bw = 0;
62 int has_ets_tc = 0;
63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]);
68 return -EINVAL;
69 }
70
71 switch (ets->tc_tsa[i]) {
72 case IEEE_8021QAZ_TSA_STRICT:
73 break;
74 case IEEE_8021QAZ_TSA_ETS:
75 has_ets_tc = 1;
76 total_ets_bw += ets->tc_tx_bw[i];
77 break;
78 default:
79 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
80 i, ets->tc_tsa[i]);
81 return -ENOTSUPP;
82 }
83 }
84
85 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
86 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
87 total_ets_bw);
88 return -EINVAL;
89 }
90
91 return 0;
92}
93
94static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
95 struct ieee_ets *ets, u16 *ratelimit)
96{
97 struct mlx4_en_dev *mdev = priv->mdev;
98 int num_strict = 0;
99 int i;
100 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
101 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
102
103 ets = ets ?: &priv->ets;
104 ratelimit = ratelimit ?: priv->maxrate;
105
106
107 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
108 switch (ets->tc_tsa[i]) {
109 case IEEE_8021QAZ_TSA_STRICT:
110 pg[i] = num_strict++;
111 tc_tx_bw[i] = MLX4_EN_BW_MAX;
112 break;
113 case IEEE_8021QAZ_TSA_ETS:
114 pg[i] = MLX4_EN_TC_ETS;
115 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
116 break;
117 }
118 }
119
120 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
121 ratelimit);
122}
123
124static int
125mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
126{
127 struct mlx4_en_priv *priv = netdev_priv(dev);
128 struct mlx4_en_dev *mdev = priv->mdev;
129 int err;
130
131 err = mlx4_en_ets_validate(priv, ets);
132 if (err)
133 return err;
134
135 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
136 if (err)
137 return err;
138
139 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
140 if (err)
141 return err;
142
143 memcpy(&priv->ets, ets, sizeof(priv->ets));
144
145 return 0;
146}
147
148static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
149 struct ieee_pfc *pfc)
150{
151 struct mlx4_en_priv *priv = netdev_priv(dev);
152
153 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
154 pfc->pfc_en = priv->prof->tx_ppp;
155
156 return 0;
157}
158
159static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_port_profile *prof = priv->prof;
164 struct mlx4_en_dev *mdev = priv->mdev;
165 int err;
166
167 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
168 pfc->pfc_cap,
169 pfc->pfc_en,
170 pfc->mbc,
171 pfc->delay);
172
173 prof->rx_pause = !pfc->pfc_en;
174 prof->tx_pause = !pfc->pfc_en;
175 prof->rx_ppp = pfc->pfc_en;
176 prof->tx_ppp = pfc->pfc_en;
177
178 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
179 priv->rx_skb_size + ETH_FCS_LEN,
180 prof->tx_pause,
181 prof->tx_ppp,
182 prof->rx_pause,
183 prof->rx_ppp);
184 if (err)
185 en_err(priv, "Failed setting pause params\n");
186
187 return err;
188}
189
190static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
191{
192 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
193}
194
195static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
196{
197 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
198 (mode & DCB_CAP_DCBX_VER_CEE) ||
199 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
200 !(mode & DCB_CAP_DCBX_HOST))
201 return 1;
202
203 return 0;
204}
205
206#define MLX4_RATELIMIT_UNITS_IN_KB 100000
207static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
208 struct ieee_maxrate *maxrate)
209{
210 struct mlx4_en_priv *priv = netdev_priv(dev);
211 int i;
212
213 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
214 maxrate->tc_maxrate[i] =
215 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
216
217 return 0;
218}
219
220static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
221 struct ieee_maxrate *maxrate)
222{
223 struct mlx4_en_priv *priv = netdev_priv(dev);
224 u16 tmp[IEEE_8021QAZ_MAX_TCS];
225 int i, err;
226
227 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
228
229
230
231 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
232 MLX4_RATELIMIT_UNITS_IN_KB - 1,
233 MLX4_RATELIMIT_UNITS_IN_KB);
234 }
235
236 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
237 if (err)
238 return err;
239
240 memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
241
242 return 0;
243}
244
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
248 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
249 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
250 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
251 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
252
253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255};
256
257const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
258 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
259 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
260
261 .getdcbx = mlx4_en_dcbnl_getdcbx,
262 .setdcbx = mlx4_en_dcbnl_setdcbx,
263};
264