1
2
3
4#include "hclge_main.h"
5#include "hclge_tm.h"
6#include "hnae3.h"
7
8#define BW_PERCENT 100
9
10static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
11 struct ieee_ets *ets)
12{
13 u8 i;
14
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
19 HCLGE_SCH_MODE_SP;
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
21 break;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
24 HCLGE_SCH_MODE_DWRR;
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
26 ets->tc_tx_bw[i];
27 break;
28 default:
29
30
31
32
33
34 return -EINVAL;
35 }
36 }
37
38 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
39
40 return 0;
41}
42
43static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
44 struct ieee_ets *ets)
45{
46 u32 i;
47
48 memset(ets, 0, sizeof(*ets));
49 ets->willing = 1;
50 ets->ets_cap = hdev->tc_max;
51
52 for (i = 0; i < HNAE3_MAX_TC; i++) {
53 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
54 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
55
56 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
57 HCLGE_SCH_MODE_SP)
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
59 else
60 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
61 }
62}
63
64
65static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
66{
67 struct hclge_vport *vport = hclge_get_vport(h);
68 struct hclge_dev *hdev = vport->back;
69
70 hclge_tm_info_to_ieee_ets(hdev, ets);
71
72 return 0;
73}
74
75static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
76 u8 *prio_tc)
77{
78 int i;
79
80 if (num_tc > hdev->tc_max) {
81 dev_err(&hdev->pdev->dev,
82 "tc num checking failed, %u > tc_max(%u)\n",
83 num_tc, hdev->tc_max);
84 return -EINVAL;
85 }
86
87 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
88 if (prio_tc[i] >= num_tc) {
89 dev_err(&hdev->pdev->dev,
90 "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
91 i, prio_tc[i], num_tc);
92 return -EINVAL;
93 }
94 }
95
96 if (num_tc > hdev->vport[0].alloc_tqps) {
97 dev_err(&hdev->pdev->dev,
98 "allocated tqp checking failed, %u > tqp(%u)\n",
99 num_tc, hdev->vport[0].alloc_tqps);
100 return -EINVAL;
101 }
102
103 return 0;
104}
105
106static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
107 u8 *tc, bool *changed)
108{
109 bool has_ets_tc = false;
110 u32 total_ets_bw = 0;
111 u8 max_tc = 0;
112 int ret;
113 u8 i;
114
115 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
116 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
117 *changed = true;
118
119 if (ets->prio_tc[i] > max_tc)
120 max_tc = ets->prio_tc[i];
121 }
122
123 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
124 if (ret)
125 return ret;
126
127 for (i = 0; i < HNAE3_MAX_TC; i++) {
128 switch (ets->tc_tsa[i]) {
129 case IEEE_8021QAZ_TSA_STRICT:
130 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
131 HCLGE_SCH_MODE_SP)
132 *changed = true;
133 break;
134 case IEEE_8021QAZ_TSA_ETS:
135 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
136 HCLGE_SCH_MODE_DWRR)
137 *changed = true;
138
139 total_ets_bw += ets->tc_tx_bw[i];
140 has_ets_tc = true;
141 break;
142 default:
143 return -EINVAL;
144 }
145 }
146
147 if (has_ets_tc && total_ets_bw != BW_PERCENT)
148 return -EINVAL;
149
150 *tc = max_tc + 1;
151 if (*tc != hdev->tm_info.num_tc)
152 *changed = true;
153
154 return 0;
155}
156
157static int hclge_map_update(struct hclge_dev *hdev)
158{
159 int ret;
160
161 ret = hclge_tm_schd_setup_hw(hdev);
162 if (ret)
163 return ret;
164
165 ret = hclge_pause_setup_hw(hdev, false);
166 if (ret)
167 return ret;
168
169 ret = hclge_buffer_alloc(hdev);
170 if (ret)
171 return ret;
172
173 hclge_rss_indir_init_cfg(hdev);
174
175 return hclge_rss_init_hw(hdev);
176}
177
178static int hclge_client_setup_tc(struct hclge_dev *hdev)
179{
180 struct hclge_vport *vport = hdev->vport;
181 struct hnae3_client *client;
182 struct hnae3_handle *handle;
183 int ret;
184 u32 i;
185
186 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
187 handle = &vport[i].nic;
188 client = handle->client;
189
190 if (!client || !client->ops || !client->ops->setup_tc)
191 continue;
192
193 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
194 if (ret)
195 return ret;
196 }
197
198 return 0;
199}
200
201static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
202{
203 struct hclge_vport *vport = hclge_get_vport(h);
204 struct hclge_dev *hdev = vport->back;
205 bool map_changed = false;
206 u8 num_tc = 0;
207 int ret;
208
209 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
210 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
211 return -EINVAL;
212
213 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
214 if (ret)
215 return ret;
216
217 if (map_changed) {
218 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
219 if (ret)
220 return ret;
221
222 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
223 if (ret)
224 return ret;
225 }
226
227 hclge_tm_schd_info_update(hdev, num_tc);
228
229 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
230 if (ret)
231 goto err_out;
232
233 if (map_changed) {
234 ret = hclge_map_update(hdev);
235 if (ret)
236 goto err_out;
237
238 ret = hclge_client_setup_tc(hdev);
239 if (ret)
240 goto err_out;
241
242 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
243 if (ret)
244 return ret;
245
246 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
247 if (ret)
248 return ret;
249 }
250
251 return hclge_tm_dwrr_cfg(hdev);
252
253err_out:
254 if (!map_changed)
255 return ret;
256
257 if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
258 return ret;
259
260 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
261 return ret;
262}
263
264static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
265{
266 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
267 struct hclge_vport *vport = hclge_get_vport(h);
268 struct hclge_dev *hdev = vport->back;
269 u8 i, j, pfc_map, *prio_tc;
270 int ret;
271
272 memset(pfc, 0, sizeof(*pfc));
273 pfc->pfc_cap = hdev->pfc_max;
274 prio_tc = hdev->tm_info.prio_tc;
275 pfc_map = hdev->tm_info.hw_pfc_map;
276
277
278 for (i = 0; i < hdev->tm_info.num_tc; i++) {
279 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
280 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
281 pfc->pfc_en |= BIT(j);
282 }
283 }
284
285 ret = hclge_pfc_tx_stats_get(hdev, requests);
286 if (ret)
287 return ret;
288
289 ret = hclge_pfc_rx_stats_get(hdev, indications);
290 if (ret)
291 return ret;
292
293 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
294 pfc->requests[i] = requests[i];
295 pfc->indications[i] = indications[i];
296 }
297 return 0;
298}
299
300static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
301{
302 struct hclge_vport *vport = hclge_get_vport(h);
303 struct hclge_dev *hdev = vport->back;
304 u8 i, j, pfc_map, *prio_tc;
305
306 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
307 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
308 return -EINVAL;
309
310 if (pfc->pfc_en == hdev->tm_info.pfc_en)
311 return 0;
312
313 prio_tc = hdev->tm_info.prio_tc;
314 pfc_map = 0;
315
316 for (i = 0; i < hdev->tm_info.num_tc; i++) {
317 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
318 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
319 pfc_map |= BIT(i);
320 break;
321 }
322 }
323 }
324
325 hdev->tm_info.hw_pfc_map = pfc_map;
326 hdev->tm_info.pfc_en = pfc->pfc_en;
327
328 hclge_tm_pfc_info_update(hdev);
329
330 return hclge_pause_setup_hw(hdev, false);
331}
332
333
334static u8 hclge_getdcbx(struct hnae3_handle *h)
335{
336 struct hclge_vport *vport = hclge_get_vport(h);
337 struct hclge_dev *hdev = vport->back;
338
339 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
340 return 0;
341
342 return hdev->dcbx_cap;
343}
344
345static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
346{
347 struct hclge_vport *vport = hclge_get_vport(h);
348 struct hclge_dev *hdev = vport->back;
349
350
351 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
352 (mode & DCB_CAP_DCBX_VER_CEE) ||
353 !(mode & DCB_CAP_DCBX_HOST))
354 return 1;
355
356 hdev->dcbx_cap = mode;
357
358 return 0;
359}
360
361
362static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
363{
364 struct hclge_vport *vport = hclge_get_vport(h);
365 struct hclge_dev *hdev = vport->back;
366 int ret;
367
368 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
369 return -EINVAL;
370
371 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
372 if (ret)
373 return -EINVAL;
374
375 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
376 if (ret)
377 return ret;
378
379 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
380 if (ret)
381 return ret;
382
383 hclge_tm_schd_info_update(hdev, tc);
384 hclge_tm_prio_tc_info_update(hdev, prio_tc);
385
386 ret = hclge_tm_init_hw(hdev, false);
387 if (ret)
388 goto err_out;
389
390 ret = hclge_client_setup_tc(hdev);
391 if (ret)
392 goto err_out;
393
394 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
395
396 if (tc > 1)
397 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
398 else
399 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
400
401 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
402 if (ret)
403 return ret;
404
405 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
406
407err_out:
408 if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
409 return ret;
410
411 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
412 return ret;
413}
414
415static const struct hnae3_dcb_ops hns3_dcb_ops = {
416 .ieee_getets = hclge_ieee_getets,
417 .ieee_setets = hclge_ieee_setets,
418 .ieee_getpfc = hclge_ieee_getpfc,
419 .ieee_setpfc = hclge_ieee_setpfc,
420 .getdcbx = hclge_getdcbx,
421 .setdcbx = hclge_setdcbx,
422 .setup_tc = hclge_setup_tc,
423};
424
425void hclge_dcb_ops_set(struct hclge_dev *hdev)
426{
427 struct hclge_vport *vport = hdev->vport;
428 struct hnae3_knic_private_info *kinfo;
429
430
431
432
433 if (!hnae3_dev_dcb_supported(hdev) ||
434 vport->vport_id != 0)
435 return;
436
437 kinfo = &vport->nic.kinfo;
438 kinfo->dcb_ops = &hns3_dcb_ops;
439 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
440}
441