1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/netdevice.h>
21#include <linux/netlink.h>
22#include <linux/slab.h>
23#include <net/netlink.h>
24#include <net/rtnetlink.h>
25#include <linux/dcbnl.h>
26#include <net/dcbevent.h>
27#include <linux/rtnetlink.h>
28#include <linux/init.h>
29#include <net/sock.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
56 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
57 [DCB_ATTR_STATE] = {.type = NLA_U8},
58 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
59 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
60 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
61 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
62 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
63 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
64 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
65 [DCB_ATTR_APP] = {.type = NLA_NESTED},
66 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
67 [DCB_ATTR_DCBX] = {.type = NLA_U8},
68 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
69};
70
71
72static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
73 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
75 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
76 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
77 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
78 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
81 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
82};
83
84
85static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
86 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
88 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
89 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
90 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
91 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
95 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
96 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
97 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
98 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
99 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
100 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
103 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
104};
105
106
107static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
108 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
110 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
111 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
112 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
113};
114
115
116static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
117 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
118 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
119 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
120 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
121 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
122 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
123 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
125 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
126};
127
128
129static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
130 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
131 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
132 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
133};
134
135
136static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
137 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
138 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
139 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
140 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
141 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
142 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
144 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
145 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
146 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
147 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
148 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
149 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
150 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
151 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
153 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
160 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
161 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
162};
163
164
165static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
166 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
167 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
168 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
169};
170
171
172static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
173 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
174 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
175 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
176 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
177 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
178 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
179 [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
180};
181
182
183static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
184 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
185 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
186 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
187 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
188};
189
190static LIST_HEAD(dcb_app_list);
191static DEFINE_SPINLOCK(dcb_lock);
192
193static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
194 u32 flags, struct nlmsghdr **nlhp)
195{
196 struct sk_buff *skb;
197 struct dcbmsg *dcb;
198 struct nlmsghdr *nlh;
199
200 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
201 if (!skb)
202 return NULL;
203
204 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
205 BUG_ON(!nlh);
206
207 dcb = nlmsg_data(nlh);
208 dcb->dcb_family = AF_UNSPEC;
209 dcb->cmd = cmd;
210 dcb->dcb_pad = 0;
211
212 if (nlhp)
213 *nlhp = nlh;
214
215 return skb;
216}
217
218static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
219 u32 seq, struct nlattr **tb, struct sk_buff *skb)
220{
221
222 if (!netdev->dcbnl_ops->getstate)
223 return -EOPNOTSUPP;
224
225 return nla_put_u8(skb, DCB_ATTR_STATE,
226 netdev->dcbnl_ops->getstate(netdev));
227}
228
229static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
230 u32 seq, struct nlattr **tb, struct sk_buff *skb)
231{
232 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
233 u8 value;
234 int ret;
235 int i;
236 int getall = 0;
237
238 if (!tb[DCB_ATTR_PFC_CFG])
239 return -EINVAL;
240
241 if (!netdev->dcbnl_ops->getpfccfg)
242 return -EOPNOTSUPP;
243
244 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
245 tb[DCB_ATTR_PFC_CFG],
246 dcbnl_pfc_up_nest, NULL);
247 if (ret)
248 return ret;
249
250 nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
251 if (!nest)
252 return -EMSGSIZE;
253
254 if (data[DCB_PFC_UP_ATTR_ALL])
255 getall = 1;
256
257 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
258 if (!getall && !data[i])
259 continue;
260
261 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
262 &value);
263 ret = nla_put_u8(skb, i, value);
264 if (ret) {
265 nla_nest_cancel(skb, nest);
266 return ret;
267 }
268 }
269 nla_nest_end(skb, nest);
270
271 return 0;
272}
273
274static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
275 u32 seq, struct nlattr **tb, struct sk_buff *skb)
276{
277 u8 perm_addr[MAX_ADDR_LEN];
278
279 if (!netdev->dcbnl_ops->getpermhwaddr)
280 return -EOPNOTSUPP;
281
282 memset(perm_addr, 0, sizeof(perm_addr));
283 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
284
285 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
286}
287
288static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
289 u32 seq, struct nlattr **tb, struct sk_buff *skb)
290{
291 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
292 u8 value;
293 int ret;
294 int i;
295 int getall = 0;
296
297 if (!tb[DCB_ATTR_CAP])
298 return -EINVAL;
299
300 if (!netdev->dcbnl_ops->getcap)
301 return -EOPNOTSUPP;
302
303 ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
304 tb[DCB_ATTR_CAP], dcbnl_cap_nest,
305 NULL);
306 if (ret)
307 return ret;
308
309 nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
310 if (!nest)
311 return -EMSGSIZE;
312
313 if (data[DCB_CAP_ATTR_ALL])
314 getall = 1;
315
316 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
317 if (!getall && !data[i])
318 continue;
319
320 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
321 ret = nla_put_u8(skb, i, value);
322 if (ret) {
323 nla_nest_cancel(skb, nest);
324 return ret;
325 }
326 }
327 }
328 nla_nest_end(skb, nest);
329
330 return 0;
331}
332
333static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
334 u32 seq, struct nlattr **tb, struct sk_buff *skb)
335{
336 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
337 u8 value;
338 int ret;
339 int i;
340 int getall = 0;
341
342 if (!tb[DCB_ATTR_NUMTCS])
343 return -EINVAL;
344
345 if (!netdev->dcbnl_ops->getnumtcs)
346 return -EOPNOTSUPP;
347
348 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
349 tb[DCB_ATTR_NUMTCS],
350 dcbnl_numtcs_nest, NULL);
351 if (ret)
352 return ret;
353
354 nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
355 if (!nest)
356 return -EMSGSIZE;
357
358 if (data[DCB_NUMTCS_ATTR_ALL])
359 getall = 1;
360
361 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
362 if (!getall && !data[i])
363 continue;
364
365 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
366 if (!ret) {
367 ret = nla_put_u8(skb, i, value);
368 if (ret) {
369 nla_nest_cancel(skb, nest);
370 return ret;
371 }
372 } else
373 return -EINVAL;
374 }
375 nla_nest_end(skb, nest);
376
377 return 0;
378}
379
380static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
381 u32 seq, struct nlattr **tb, struct sk_buff *skb)
382{
383 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
384 int ret;
385 u8 value;
386 int i;
387
388 if (!tb[DCB_ATTR_NUMTCS])
389 return -EINVAL;
390
391 if (!netdev->dcbnl_ops->setnumtcs)
392 return -EOPNOTSUPP;
393
394 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
395 tb[DCB_ATTR_NUMTCS],
396 dcbnl_numtcs_nest, NULL);
397 if (ret)
398 return ret;
399
400 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
401 if (data[i] == NULL)
402 continue;
403
404 value = nla_get_u8(data[i]);
405
406 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
407 if (ret)
408 break;
409 }
410
411 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
412}
413
414static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
415 u32 seq, struct nlattr **tb, struct sk_buff *skb)
416{
417 if (!netdev->dcbnl_ops->getpfcstate)
418 return -EOPNOTSUPP;
419
420 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
421 netdev->dcbnl_ops->getpfcstate(netdev));
422}
423
424static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
425 u32 seq, struct nlattr **tb, struct sk_buff *skb)
426{
427 u8 value;
428
429 if (!tb[DCB_ATTR_PFC_STATE])
430 return -EINVAL;
431
432 if (!netdev->dcbnl_ops->setpfcstate)
433 return -EOPNOTSUPP;
434
435 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
436
437 netdev->dcbnl_ops->setpfcstate(netdev, value);
438
439 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
440}
441
442static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
443 u32 seq, struct nlattr **tb, struct sk_buff *skb)
444{
445 struct nlattr *app_nest;
446 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
447 u16 id;
448 u8 up, idtype;
449 int ret;
450
451 if (!tb[DCB_ATTR_APP])
452 return -EINVAL;
453
454 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
455 tb[DCB_ATTR_APP], dcbnl_app_nest,
456 NULL);
457 if (ret)
458 return ret;
459
460
461 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
462 (!app_tb[DCB_APP_ATTR_ID]))
463 return -EINVAL;
464
465
466 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
467 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
468 (idtype != DCB_APP_IDTYPE_PORTNUM))
469 return -EINVAL;
470
471 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
472
473 if (netdev->dcbnl_ops->getapp) {
474 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
475 if (ret < 0)
476 return ret;
477 else
478 up = ret;
479 } else {
480 struct dcb_app app = {
481 .selector = idtype,
482 .protocol = id,
483 };
484 up = dcb_getapp(netdev, &app);
485 }
486
487 app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
488 if (!app_nest)
489 return -EMSGSIZE;
490
491 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
492 if (ret)
493 goto out_cancel;
494
495 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
496 if (ret)
497 goto out_cancel;
498
499 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
500 if (ret)
501 goto out_cancel;
502
503 nla_nest_end(skb, app_nest);
504
505 return 0;
506
507out_cancel:
508 nla_nest_cancel(skb, app_nest);
509 return ret;
510}
511
512static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
513 u32 seq, struct nlattr **tb, struct sk_buff *skb)
514{
515 int ret;
516 u16 id;
517 u8 up, idtype;
518 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
519
520 if (!tb[DCB_ATTR_APP])
521 return -EINVAL;
522
523 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
524 tb[DCB_ATTR_APP], dcbnl_app_nest,
525 NULL);
526 if (ret)
527 return ret;
528
529
530 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
531 (!app_tb[DCB_APP_ATTR_ID]) ||
532 (!app_tb[DCB_APP_ATTR_PRIORITY]))
533 return -EINVAL;
534
535
536 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
537 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
538 (idtype != DCB_APP_IDTYPE_PORTNUM))
539 return -EINVAL;
540
541 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
542 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
543
544 if (netdev->dcbnl_ops->setapp) {
545 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
546 if (ret < 0)
547 return ret;
548 } else {
549 struct dcb_app app;
550 app.selector = idtype;
551 app.protocol = id;
552 app.priority = up;
553 ret = dcb_setapp(netdev, &app);
554 }
555
556 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
557 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
558
559 return ret;
560}
561
562static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
563 struct nlattr **tb, struct sk_buff *skb, int dir)
564{
565 struct nlattr *pg_nest, *param_nest, *data;
566 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
567 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
568 u8 prio, pgid, tc_pct, up_map;
569 int ret;
570 int getall = 0;
571 int i;
572
573 if (!tb[DCB_ATTR_PG_CFG])
574 return -EINVAL;
575
576 if (!netdev->dcbnl_ops->getpgtccfgtx ||
577 !netdev->dcbnl_ops->getpgtccfgrx ||
578 !netdev->dcbnl_ops->getpgbwgcfgtx ||
579 !netdev->dcbnl_ops->getpgbwgcfgrx)
580 return -EOPNOTSUPP;
581
582 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
583 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
584 NULL);
585 if (ret)
586 return ret;
587
588 pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
589 if (!pg_nest)
590 return -EMSGSIZE;
591
592 if (pg_tb[DCB_PG_ATTR_TC_ALL])
593 getall = 1;
594
595 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
596 if (!getall && !pg_tb[i])
597 continue;
598
599 if (pg_tb[DCB_PG_ATTR_TC_ALL])
600 data = pg_tb[DCB_PG_ATTR_TC_ALL];
601 else
602 data = pg_tb[i];
603 ret = nla_parse_nested_deprecated(param_tb,
604 DCB_TC_ATTR_PARAM_MAX, data,
605 dcbnl_tc_param_nest, NULL);
606 if (ret)
607 goto err_pg;
608
609 param_nest = nla_nest_start_noflag(skb, i);
610 if (!param_nest)
611 goto err_pg;
612
613 pgid = DCB_ATTR_VALUE_UNDEFINED;
614 prio = DCB_ATTR_VALUE_UNDEFINED;
615 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
616 up_map = DCB_ATTR_VALUE_UNDEFINED;
617
618 if (dir) {
619
620 netdev->dcbnl_ops->getpgtccfgrx(netdev,
621 i - DCB_PG_ATTR_TC_0, &prio,
622 &pgid, &tc_pct, &up_map);
623 } else {
624
625 netdev->dcbnl_ops->getpgtccfgtx(netdev,
626 i - DCB_PG_ATTR_TC_0, &prio,
627 &pgid, &tc_pct, &up_map);
628 }
629
630 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
631 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
632 ret = nla_put_u8(skb,
633 DCB_TC_ATTR_PARAM_PGID, pgid);
634 if (ret)
635 goto err_param;
636 }
637 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
638 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
639 ret = nla_put_u8(skb,
640 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
641 if (ret)
642 goto err_param;
643 }
644 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
645 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
646 ret = nla_put_u8(skb,
647 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
648 if (ret)
649 goto err_param;
650 }
651 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
652 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
653 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
654 tc_pct);
655 if (ret)
656 goto err_param;
657 }
658 nla_nest_end(skb, param_nest);
659 }
660
661 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
662 getall = 1;
663 else
664 getall = 0;
665
666 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
667 if (!getall && !pg_tb[i])
668 continue;
669
670 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
671
672 if (dir) {
673
674 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
675 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
676 } else {
677
678 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
679 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
680 }
681 ret = nla_put_u8(skb, i, tc_pct);
682 if (ret)
683 goto err_pg;
684 }
685
686 nla_nest_end(skb, pg_nest);
687
688 return 0;
689
690err_param:
691 nla_nest_cancel(skb, param_nest);
692err_pg:
693 nla_nest_cancel(skb, pg_nest);
694
695 return -EMSGSIZE;
696}
697
698static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
699 u32 seq, struct nlattr **tb, struct sk_buff *skb)
700{
701 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
702}
703
704static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
705 u32 seq, struct nlattr **tb, struct sk_buff *skb)
706{
707 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
708}
709
710static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
711 u32 seq, struct nlattr **tb, struct sk_buff *skb)
712{
713 u8 value;
714
715 if (!tb[DCB_ATTR_STATE])
716 return -EINVAL;
717
718 if (!netdev->dcbnl_ops->setstate)
719 return -EOPNOTSUPP;
720
721 value = nla_get_u8(tb[DCB_ATTR_STATE]);
722
723 return nla_put_u8(skb, DCB_ATTR_STATE,
724 netdev->dcbnl_ops->setstate(netdev, value));
725}
726
727static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
728 u32 seq, struct nlattr **tb, struct sk_buff *skb)
729{
730 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
731 int i;
732 int ret;
733 u8 value;
734
735 if (!tb[DCB_ATTR_PFC_CFG])
736 return -EINVAL;
737
738 if (!netdev->dcbnl_ops->setpfccfg)
739 return -EOPNOTSUPP;
740
741 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
742 tb[DCB_ATTR_PFC_CFG],
743 dcbnl_pfc_up_nest, NULL);
744 if (ret)
745 return ret;
746
747 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
748 if (data[i] == NULL)
749 continue;
750 value = nla_get_u8(data[i]);
751 netdev->dcbnl_ops->setpfccfg(netdev,
752 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
753 }
754
755 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
756}
757
758static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
759 u32 seq, struct nlattr **tb, struct sk_buff *skb)
760{
761 int ret;
762
763 if (!tb[DCB_ATTR_SET_ALL])
764 return -EINVAL;
765
766 if (!netdev->dcbnl_ops->setall)
767 return -EOPNOTSUPP;
768
769 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
770 netdev->dcbnl_ops->setall(netdev));
771 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
772
773 return ret;
774}
775
776static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
777 u32 seq, struct nlattr **tb, struct sk_buff *skb,
778 int dir)
779{
780 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
781 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
782 int ret;
783 int i;
784 u8 pgid;
785 u8 up_map;
786 u8 prio;
787 u8 tc_pct;
788
789 if (!tb[DCB_ATTR_PG_CFG])
790 return -EINVAL;
791
792 if (!netdev->dcbnl_ops->setpgtccfgtx ||
793 !netdev->dcbnl_ops->setpgtccfgrx ||
794 !netdev->dcbnl_ops->setpgbwgcfgtx ||
795 !netdev->dcbnl_ops->setpgbwgcfgrx)
796 return -EOPNOTSUPP;
797
798 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
799 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
800 NULL);
801 if (ret)
802 return ret;
803
804 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
805 if (!pg_tb[i])
806 continue;
807
808 ret = nla_parse_nested_deprecated(param_tb,
809 DCB_TC_ATTR_PARAM_MAX,
810 pg_tb[i],
811 dcbnl_tc_param_nest, NULL);
812 if (ret)
813 return ret;
814
815 pgid = DCB_ATTR_VALUE_UNDEFINED;
816 prio = DCB_ATTR_VALUE_UNDEFINED;
817 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
818 up_map = DCB_ATTR_VALUE_UNDEFINED;
819
820 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
821 prio =
822 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
823
824 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
825 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
826
827 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
828 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
829
830 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
831 up_map =
832 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
833
834
835 if (dir) {
836
837 netdev->dcbnl_ops->setpgtccfgrx(netdev,
838 i - DCB_PG_ATTR_TC_0,
839 prio, pgid, tc_pct, up_map);
840 } else {
841
842 netdev->dcbnl_ops->setpgtccfgtx(netdev,
843 i - DCB_PG_ATTR_TC_0,
844 prio, pgid, tc_pct, up_map);
845 }
846 }
847
848 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
849 if (!pg_tb[i])
850 continue;
851
852 tc_pct = nla_get_u8(pg_tb[i]);
853
854
855 if (dir) {
856
857 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
858 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
859 } else {
860
861 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
862 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
863 }
864 }
865
866 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
867}
868
869static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
870 u32 seq, struct nlattr **tb, struct sk_buff *skb)
871{
872 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
873}
874
875static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
876 u32 seq, struct nlattr **tb, struct sk_buff *skb)
877{
878 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
879}
880
881static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
882 u32 seq, struct nlattr **tb, struct sk_buff *skb)
883{
884 struct nlattr *bcn_nest;
885 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
886 u8 value_byte;
887 u32 value_integer;
888 int ret;
889 bool getall = false;
890 int i;
891
892 if (!tb[DCB_ATTR_BCN])
893 return -EINVAL;
894
895 if (!netdev->dcbnl_ops->getbcnrp ||
896 !netdev->dcbnl_ops->getbcncfg)
897 return -EOPNOTSUPP;
898
899 ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
900 tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
901 NULL);
902 if (ret)
903 return ret;
904
905 bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
906 if (!bcn_nest)
907 return -EMSGSIZE;
908
909 if (bcn_tb[DCB_BCN_ATTR_ALL])
910 getall = true;
911
912 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
913 if (!getall && !bcn_tb[i])
914 continue;
915
916 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
917 &value_byte);
918 ret = nla_put_u8(skb, i, value_byte);
919 if (ret)
920 goto err_bcn;
921 }
922
923 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
924 if (!getall && !bcn_tb[i])
925 continue;
926
927 netdev->dcbnl_ops->getbcncfg(netdev, i,
928 &value_integer);
929 ret = nla_put_u32(skb, i, value_integer);
930 if (ret)
931 goto err_bcn;
932 }
933
934 nla_nest_end(skb, bcn_nest);
935
936 return 0;
937
938err_bcn:
939 nla_nest_cancel(skb, bcn_nest);
940 return ret;
941}
942
943static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
944 u32 seq, struct nlattr **tb, struct sk_buff *skb)
945{
946 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
947 int i;
948 int ret;
949 u8 value_byte;
950 u32 value_int;
951
952 if (!tb[DCB_ATTR_BCN])
953 return -EINVAL;
954
955 if (!netdev->dcbnl_ops->setbcncfg ||
956 !netdev->dcbnl_ops->setbcnrp)
957 return -EOPNOTSUPP;
958
959 ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
960 tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
961 NULL);
962 if (ret)
963 return ret;
964
965 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
966 if (data[i] == NULL)
967 continue;
968 value_byte = nla_get_u8(data[i]);
969 netdev->dcbnl_ops->setbcnrp(netdev,
970 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
971 }
972
973 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
974 if (data[i] == NULL)
975 continue;
976 value_int = nla_get_u32(data[i]);
977 netdev->dcbnl_ops->setbcncfg(netdev,
978 i, value_int);
979 }
980
981 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
982}
983
984static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
985 int app_nested_type, int app_info_type,
986 int app_entry_type)
987{
988 struct dcb_peer_app_info info;
989 struct dcb_app *table = NULL;
990 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
991 u16 app_count;
992 int err;
993
994
995
996
997
998
999 err = ops->peer_getappinfo(netdev, &info, &app_count);
1000 if (!err && app_count) {
1001 table = kmalloc_array(app_count, sizeof(struct dcb_app),
1002 GFP_KERNEL);
1003 if (!table)
1004 return -ENOMEM;
1005
1006 err = ops->peer_getapptable(netdev, table);
1007 }
1008
1009 if (!err) {
1010 u16 i;
1011 struct nlattr *app;
1012
1013
1014
1015
1016
1017 err = -EMSGSIZE;
1018
1019 app = nla_nest_start_noflag(skb, app_nested_type);
1020 if (!app)
1021 goto nla_put_failure;
1022
1023 if (app_info_type &&
1024 nla_put(skb, app_info_type, sizeof(info), &info))
1025 goto nla_put_failure;
1026
1027 for (i = 0; i < app_count; i++) {
1028 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1029 &table[i]))
1030 goto nla_put_failure;
1031 }
1032 nla_nest_end(skb, app);
1033 }
1034 err = 0;
1035
1036nla_put_failure:
1037 kfree(table);
1038 return err;
1039}
1040
1041
1042static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1043{
1044 struct nlattr *ieee, *app;
1045 struct dcb_app_type *itr;
1046 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1047 int dcbx;
1048 int err;
1049
1050 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1051 return -EMSGSIZE;
1052
1053 ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
1054 if (!ieee)
1055 return -EMSGSIZE;
1056
1057 if (ops->ieee_getets) {
1058 struct ieee_ets ets;
1059 memset(&ets, 0, sizeof(ets));
1060 err = ops->ieee_getets(netdev, &ets);
1061 if (!err &&
1062 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1063 return -EMSGSIZE;
1064 }
1065
1066 if (ops->ieee_getmaxrate) {
1067 struct ieee_maxrate maxrate;
1068 memset(&maxrate, 0, sizeof(maxrate));
1069 err = ops->ieee_getmaxrate(netdev, &maxrate);
1070 if (!err) {
1071 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1072 sizeof(maxrate), &maxrate);
1073 if (err)
1074 return -EMSGSIZE;
1075 }
1076 }
1077
1078 if (ops->ieee_getqcn) {
1079 struct ieee_qcn qcn;
1080
1081 memset(&qcn, 0, sizeof(qcn));
1082 err = ops->ieee_getqcn(netdev, &qcn);
1083 if (!err) {
1084 err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1085 sizeof(qcn), &qcn);
1086 if (err)
1087 return -EMSGSIZE;
1088 }
1089 }
1090
1091 if (ops->ieee_getqcnstats) {
1092 struct ieee_qcn_stats qcn_stats;
1093
1094 memset(&qcn_stats, 0, sizeof(qcn_stats));
1095 err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1096 if (!err) {
1097 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1098 sizeof(qcn_stats), &qcn_stats);
1099 if (err)
1100 return -EMSGSIZE;
1101 }
1102 }
1103
1104 if (ops->ieee_getpfc) {
1105 struct ieee_pfc pfc;
1106 memset(&pfc, 0, sizeof(pfc));
1107 err = ops->ieee_getpfc(netdev, &pfc);
1108 if (!err &&
1109 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1110 return -EMSGSIZE;
1111 }
1112
1113 if (ops->dcbnl_getbuffer) {
1114 struct dcbnl_buffer buffer;
1115
1116 memset(&buffer, 0, sizeof(buffer));
1117 err = ops->dcbnl_getbuffer(netdev, &buffer);
1118 if (!err &&
1119 nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
1120 return -EMSGSIZE;
1121 }
1122
1123 app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
1124 if (!app)
1125 return -EMSGSIZE;
1126
1127 spin_lock_bh(&dcb_lock);
1128 list_for_each_entry(itr, &dcb_app_list, list) {
1129 if (itr->ifindex == netdev->ifindex) {
1130 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1131 &itr->app);
1132 if (err) {
1133 spin_unlock_bh(&dcb_lock);
1134 return -EMSGSIZE;
1135 }
1136 }
1137 }
1138
1139 if (netdev->dcbnl_ops->getdcbx)
1140 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1141 else
1142 dcbx = -EOPNOTSUPP;
1143
1144 spin_unlock_bh(&dcb_lock);
1145 nla_nest_end(skb, app);
1146
1147
1148 if (ops->ieee_peer_getets) {
1149 struct ieee_ets ets;
1150 memset(&ets, 0, sizeof(ets));
1151 err = ops->ieee_peer_getets(netdev, &ets);
1152 if (!err &&
1153 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1154 return -EMSGSIZE;
1155 }
1156
1157 if (ops->ieee_peer_getpfc) {
1158 struct ieee_pfc pfc;
1159 memset(&pfc, 0, sizeof(pfc));
1160 err = ops->ieee_peer_getpfc(netdev, &pfc);
1161 if (!err &&
1162 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1163 return -EMSGSIZE;
1164 }
1165
1166 if (ops->peer_getappinfo && ops->peer_getapptable) {
1167 err = dcbnl_build_peer_app(netdev, skb,
1168 DCB_ATTR_IEEE_PEER_APP,
1169 DCB_ATTR_IEEE_APP_UNSPEC,
1170 DCB_ATTR_IEEE_APP);
1171 if (err)
1172 return -EMSGSIZE;
1173 }
1174
1175 nla_nest_end(skb, ieee);
1176 if (dcbx >= 0) {
1177 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1178 if (err)
1179 return -EMSGSIZE;
1180 }
1181
1182 return 0;
1183}
1184
1185static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1186 int dir)
1187{
1188 u8 pgid, up_map, prio, tc_pct;
1189 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1190 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1191 struct nlattr *pg = nla_nest_start_noflag(skb, i);
1192
1193 if (!pg)
1194 return -EMSGSIZE;
1195
1196 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1197 struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
1198
1199 if (!tc_nest)
1200 return -EMSGSIZE;
1201
1202 pgid = DCB_ATTR_VALUE_UNDEFINED;
1203 prio = DCB_ATTR_VALUE_UNDEFINED;
1204 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1205 up_map = DCB_ATTR_VALUE_UNDEFINED;
1206
1207 if (!dir)
1208 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1209 &prio, &pgid, &tc_pct, &up_map);
1210 else
1211 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1212 &prio, &pgid, &tc_pct, &up_map);
1213
1214 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1215 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1216 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1217 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1218 return -EMSGSIZE;
1219 nla_nest_end(skb, tc_nest);
1220 }
1221
1222 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1223 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1224
1225 if (!dir)
1226 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1227 &tc_pct);
1228 else
1229 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1230 &tc_pct);
1231 if (nla_put_u8(skb, i, tc_pct))
1232 return -EMSGSIZE;
1233 }
1234 nla_nest_end(skb, pg);
1235 return 0;
1236}
1237
1238static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1239{
1240 struct nlattr *cee, *app;
1241 struct dcb_app_type *itr;
1242 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1243 int dcbx, i, err = -EMSGSIZE;
1244 u8 value;
1245
1246 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1247 goto nla_put_failure;
1248 cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
1249 if (!cee)
1250 goto nla_put_failure;
1251
1252
1253 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1254 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1255 if (err)
1256 goto nla_put_failure;
1257 }
1258
1259 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1260 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1261 if (err)
1262 goto nla_put_failure;
1263 }
1264
1265
1266 if (ops->getpfccfg) {
1267 struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
1268 DCB_ATTR_CEE_PFC);
1269
1270 if (!pfc_nest)
1271 goto nla_put_failure;
1272
1273 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1274 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1275 if (nla_put_u8(skb, i, value))
1276 goto nla_put_failure;
1277 }
1278 nla_nest_end(skb, pfc_nest);
1279 }
1280
1281
1282 spin_lock_bh(&dcb_lock);
1283 app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
1284 if (!app)
1285 goto dcb_unlock;
1286
1287 list_for_each_entry(itr, &dcb_app_list, list) {
1288 if (itr->ifindex == netdev->ifindex) {
1289 struct nlattr *app_nest = nla_nest_start_noflag(skb,
1290 DCB_ATTR_APP);
1291 if (!app_nest)
1292 goto dcb_unlock;
1293
1294 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1295 itr->app.selector);
1296 if (err)
1297 goto dcb_unlock;
1298
1299 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1300 itr->app.protocol);
1301 if (err)
1302 goto dcb_unlock;
1303
1304 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1305 itr->app.priority);
1306 if (err)
1307 goto dcb_unlock;
1308
1309 nla_nest_end(skb, app_nest);
1310 }
1311 }
1312 nla_nest_end(skb, app);
1313
1314 if (netdev->dcbnl_ops->getdcbx)
1315 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1316 else
1317 dcbx = -EOPNOTSUPP;
1318
1319 spin_unlock_bh(&dcb_lock);
1320
1321
1322 if (ops->getfeatcfg) {
1323 struct nlattr *feat = nla_nest_start_noflag(skb,
1324 DCB_ATTR_CEE_FEAT);
1325 if (!feat)
1326 goto nla_put_failure;
1327
1328 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1329 i++)
1330 if (!ops->getfeatcfg(netdev, i, &value) &&
1331 nla_put_u8(skb, i, value))
1332 goto nla_put_failure;
1333
1334 nla_nest_end(skb, feat);
1335 }
1336
1337
1338 if (ops->cee_peer_getpg) {
1339 struct cee_pg pg;
1340 memset(&pg, 0, sizeof(pg));
1341 err = ops->cee_peer_getpg(netdev, &pg);
1342 if (!err &&
1343 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1344 goto nla_put_failure;
1345 }
1346
1347 if (ops->cee_peer_getpfc) {
1348 struct cee_pfc pfc;
1349 memset(&pfc, 0, sizeof(pfc));
1350 err = ops->cee_peer_getpfc(netdev, &pfc);
1351 if (!err &&
1352 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1353 goto nla_put_failure;
1354 }
1355
1356 if (ops->peer_getappinfo && ops->peer_getapptable) {
1357 err = dcbnl_build_peer_app(netdev, skb,
1358 DCB_ATTR_CEE_PEER_APP_TABLE,
1359 DCB_ATTR_CEE_PEER_APP_INFO,
1360 DCB_ATTR_CEE_PEER_APP);
1361 if (err)
1362 goto nla_put_failure;
1363 }
1364 nla_nest_end(skb, cee);
1365
1366
1367 if (dcbx >= 0) {
1368 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1369 if (err)
1370 goto nla_put_failure;
1371 }
1372 return 0;
1373
1374dcb_unlock:
1375 spin_unlock_bh(&dcb_lock);
1376nla_put_failure:
1377 err = -EMSGSIZE;
1378 return err;
1379}
1380
1381static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1382 u32 seq, u32 portid, int dcbx_ver)
1383{
1384 struct net *net = dev_net(dev);
1385 struct sk_buff *skb;
1386 struct nlmsghdr *nlh;
1387 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1388 int err;
1389
1390 if (!ops)
1391 return -EOPNOTSUPP;
1392
1393 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1394 if (!skb)
1395 return -ENOBUFS;
1396
1397 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1398 err = dcbnl_ieee_fill(skb, dev);
1399 else
1400 err = dcbnl_cee_fill(skb, dev);
1401
1402 if (err < 0) {
1403
1404 nlmsg_free(skb);
1405 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1406 } else {
1407
1408 nlmsg_end(skb, nlh);
1409 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1410 }
1411
1412 return err;
1413}
1414
1415int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1416 u32 seq, u32 portid)
1417{
1418 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1419}
1420EXPORT_SYMBOL(dcbnl_ieee_notify);
1421
1422int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1423 u32 seq, u32 portid)
1424{
1425 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1426}
1427EXPORT_SYMBOL(dcbnl_cee_notify);
1428
1429
1430
1431
1432
1433
1434
1435static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1436 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1437{
1438 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1439 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1440 int prio;
1441 int err;
1442
1443 if (!ops)
1444 return -EOPNOTSUPP;
1445
1446 if (!tb[DCB_ATTR_IEEE])
1447 return -EINVAL;
1448
1449 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1450 tb[DCB_ATTR_IEEE],
1451 dcbnl_ieee_policy, NULL);
1452 if (err)
1453 return err;
1454
1455 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1456 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1457 err = ops->ieee_setets(netdev, ets);
1458 if (err)
1459 goto err;
1460 }
1461
1462 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1463 struct ieee_maxrate *maxrate =
1464 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1465 err = ops->ieee_setmaxrate(netdev, maxrate);
1466 if (err)
1467 goto err;
1468 }
1469
1470 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1471 struct ieee_qcn *qcn =
1472 nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1473
1474 err = ops->ieee_setqcn(netdev, qcn);
1475 if (err)
1476 goto err;
1477 }
1478
1479 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1480 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1481 err = ops->ieee_setpfc(netdev, pfc);
1482 if (err)
1483 goto err;
1484 }
1485
1486 if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
1487 struct dcbnl_buffer *buffer =
1488 nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1489
1490 for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1491 if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1492 err = -EINVAL;
1493 goto err;
1494 }
1495 }
1496
1497 err = ops->dcbnl_setbuffer(netdev, buffer);
1498 if (err)
1499 goto err;
1500 }
1501
1502 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1503 struct nlattr *attr;
1504 int rem;
1505
1506 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1507 struct dcb_app *app_data;
1508
1509 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1510 continue;
1511
1512 if (nla_len(attr) < sizeof(struct dcb_app)) {
1513 err = -ERANGE;
1514 goto err;
1515 }
1516
1517 app_data = nla_data(attr);
1518 if (ops->ieee_setapp)
1519 err = ops->ieee_setapp(netdev, app_data);
1520 else
1521 err = dcb_ieee_setapp(netdev, app_data);
1522 if (err)
1523 goto err;
1524 }
1525 }
1526
1527err:
1528 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1529 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1530 return err;
1531}
1532
1533static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1534 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1535{
1536 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1537
1538 if (!ops)
1539 return -EOPNOTSUPP;
1540
1541 return dcbnl_ieee_fill(skb, netdev);
1542}
1543
1544static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1545 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1546{
1547 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1548 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1549 int err;
1550
1551 if (!ops)
1552 return -EOPNOTSUPP;
1553
1554 if (!tb[DCB_ATTR_IEEE])
1555 return -EINVAL;
1556
1557 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1558 tb[DCB_ATTR_IEEE],
1559 dcbnl_ieee_policy, NULL);
1560 if (err)
1561 return err;
1562
1563 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1564 struct nlattr *attr;
1565 int rem;
1566
1567 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1568 struct dcb_app *app_data;
1569
1570 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1571 continue;
1572 app_data = nla_data(attr);
1573 if (ops->ieee_delapp)
1574 err = ops->ieee_delapp(netdev, app_data);
1575 else
1576 err = dcb_ieee_delapp(netdev, app_data);
1577 if (err)
1578 goto err;
1579 }
1580 }
1581
1582err:
1583 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1584 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1585 return err;
1586}
1587
1588
1589
1590static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1591 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1592{
1593 if (!netdev->dcbnl_ops->getdcbx)
1594 return -EOPNOTSUPP;
1595
1596 return nla_put_u8(skb, DCB_ATTR_DCBX,
1597 netdev->dcbnl_ops->getdcbx(netdev));
1598}
1599
1600static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1601 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1602{
1603 u8 value;
1604
1605 if (!netdev->dcbnl_ops->setdcbx)
1606 return -EOPNOTSUPP;
1607
1608 if (!tb[DCB_ATTR_DCBX])
1609 return -EINVAL;
1610
1611 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1612
1613 return nla_put_u8(skb, DCB_ATTR_DCBX,
1614 netdev->dcbnl_ops->setdcbx(netdev, value));
1615}
1616
1617static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1618 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1619{
1620 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1621 u8 value;
1622 int ret, i;
1623 int getall = 0;
1624
1625 if (!netdev->dcbnl_ops->getfeatcfg)
1626 return -EOPNOTSUPP;
1627
1628 if (!tb[DCB_ATTR_FEATCFG])
1629 return -EINVAL;
1630
1631 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1632 tb[DCB_ATTR_FEATCFG],
1633 dcbnl_featcfg_nest, NULL);
1634 if (ret)
1635 return ret;
1636
1637 nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
1638 if (!nest)
1639 return -EMSGSIZE;
1640
1641 if (data[DCB_FEATCFG_ATTR_ALL])
1642 getall = 1;
1643
1644 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1645 if (!getall && !data[i])
1646 continue;
1647
1648 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1649 if (!ret)
1650 ret = nla_put_u8(skb, i, value);
1651
1652 if (ret) {
1653 nla_nest_cancel(skb, nest);
1654 goto nla_put_failure;
1655 }
1656 }
1657 nla_nest_end(skb, nest);
1658
1659nla_put_failure:
1660 return ret;
1661}
1662
1663static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1664 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1665{
1666 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1667 int ret, i;
1668 u8 value;
1669
1670 if (!netdev->dcbnl_ops->setfeatcfg)
1671 return -ENOTSUPP;
1672
1673 if (!tb[DCB_ATTR_FEATCFG])
1674 return -EINVAL;
1675
1676 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1677 tb[DCB_ATTR_FEATCFG],
1678 dcbnl_featcfg_nest, NULL);
1679
1680 if (ret)
1681 goto err;
1682
1683 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1684 if (data[i] == NULL)
1685 continue;
1686
1687 value = nla_get_u8(data[i]);
1688
1689 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1690
1691 if (ret)
1692 goto err;
1693 }
1694err:
1695 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1696
1697 return ret;
1698}
1699
1700
1701static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1702 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1703{
1704 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1705
1706 if (!ops)
1707 return -EOPNOTSUPP;
1708
1709 return dcbnl_cee_fill(skb, netdev);
1710}
1711
1712struct reply_func {
1713
1714 int type;
1715
1716
1717 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1718 struct nlattr **, struct sk_buff *);
1719};
1720
1721static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1722 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1723 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1724 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1725 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1726 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1727 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1728 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1729 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1730 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1731 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1732 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1733 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1734 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1735 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1736 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1737 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1738 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1739 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1740 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1741 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1742 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1743 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1744 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1745 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1746 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1747 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1748 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1749};
1750
1751static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1752 struct netlink_ext_ack *extack)
1753{
1754 struct net *net = sock_net(skb->sk);
1755 struct net_device *netdev;
1756 struct dcbmsg *dcb = nlmsg_data(nlh);
1757 struct nlattr *tb[DCB_ATTR_MAX + 1];
1758 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1759 int ret = -EINVAL;
1760 struct sk_buff *reply_skb;
1761 struct nlmsghdr *reply_nlh = NULL;
1762 const struct reply_func *fn;
1763
1764 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1765 return -EPERM;
1766
1767 ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1768 dcbnl_rtnl_policy, extack);
1769 if (ret < 0)
1770 return ret;
1771
1772 if (dcb->cmd > DCB_CMD_MAX)
1773 return -EINVAL;
1774
1775
1776 fn = &reply_funcs[dcb->cmd];
1777 if (!fn->cb)
1778 return -EOPNOTSUPP;
1779 if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
1780 return -EPERM;
1781
1782 if (!tb[DCB_ATTR_IFNAME])
1783 return -EINVAL;
1784
1785 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1786 if (!netdev)
1787 return -ENODEV;
1788
1789 if (!netdev->dcbnl_ops)
1790 return -EOPNOTSUPP;
1791
1792 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1793 nlh->nlmsg_flags, &reply_nlh);
1794 if (!reply_skb)
1795 return -ENOBUFS;
1796
1797 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1798 if (ret < 0) {
1799 nlmsg_free(reply_skb);
1800 goto out;
1801 }
1802
1803 nlmsg_end(reply_skb, reply_nlh);
1804
1805 ret = rtnl_unicast(reply_skb, net, portid);
1806out:
1807 return ret;
1808}
1809
1810static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1811 int ifindex, int prio)
1812{
1813 struct dcb_app_type *itr;
1814
1815 list_for_each_entry(itr, &dcb_app_list, list) {
1816 if (itr->app.selector == app->selector &&
1817 itr->app.protocol == app->protocol &&
1818 itr->ifindex == ifindex &&
1819 ((prio == -1) || itr->app.priority == prio))
1820 return itr;
1821 }
1822
1823 return NULL;
1824}
1825
1826static int dcb_app_add(const struct dcb_app *app, int ifindex)
1827{
1828 struct dcb_app_type *entry;
1829
1830 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1831 if (!entry)
1832 return -ENOMEM;
1833
1834 memcpy(&entry->app, app, sizeof(*app));
1835 entry->ifindex = ifindex;
1836 list_add(&entry->list, &dcb_app_list);
1837
1838 return 0;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1849{
1850 struct dcb_app_type *itr;
1851 u8 prio = 0;
1852
1853 spin_lock_bh(&dcb_lock);
1854 itr = dcb_app_lookup(app, dev->ifindex, -1);
1855 if (itr)
1856 prio = itr->app.priority;
1857 spin_unlock_bh(&dcb_lock);
1858
1859 return prio;
1860}
1861EXPORT_SYMBOL(dcb_getapp);
1862
1863
1864
1865
1866
1867
1868
1869
1870int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1871{
1872 struct dcb_app_type *itr;
1873 struct dcb_app_type event;
1874 int err = 0;
1875
1876 event.ifindex = dev->ifindex;
1877 memcpy(&event.app, new, sizeof(event.app));
1878 if (dev->dcbnl_ops->getdcbx)
1879 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1880
1881 spin_lock_bh(&dcb_lock);
1882
1883 itr = dcb_app_lookup(new, dev->ifindex, -1);
1884 if (itr) {
1885 if (new->priority)
1886 itr->app.priority = new->priority;
1887 else {
1888 list_del(&itr->list);
1889 kfree(itr);
1890 }
1891 goto out;
1892 }
1893
1894 if (new->priority)
1895 err = dcb_app_add(new, dev->ifindex);
1896out:
1897 spin_unlock_bh(&dcb_lock);
1898 if (!err)
1899 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1900 return err;
1901}
1902EXPORT_SYMBOL(dcb_setapp);
1903
1904
1905
1906
1907
1908
1909
1910
1911u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1912{
1913 struct dcb_app_type *itr;
1914 u8 prio = 0;
1915
1916 spin_lock_bh(&dcb_lock);
1917 itr = dcb_app_lookup(app, dev->ifindex, -1);
1918 if (itr)
1919 prio |= 1 << itr->app.priority;
1920 spin_unlock_bh(&dcb_lock);
1921
1922 return prio;
1923}
1924EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1935{
1936 struct dcb_app_type event;
1937 int err = 0;
1938
1939 event.ifindex = dev->ifindex;
1940 memcpy(&event.app, new, sizeof(event.app));
1941 if (dev->dcbnl_ops->getdcbx)
1942 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1943
1944 spin_lock_bh(&dcb_lock);
1945
1946 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1947 err = -EEXIST;
1948 goto out;
1949 }
1950
1951 err = dcb_app_add(new, dev->ifindex);
1952out:
1953 spin_unlock_bh(&dcb_lock);
1954 if (!err)
1955 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1956 return err;
1957}
1958EXPORT_SYMBOL(dcb_ieee_setapp);
1959
1960
1961
1962
1963
1964
1965int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1966{
1967 struct dcb_app_type *itr;
1968 struct dcb_app_type event;
1969 int err = -ENOENT;
1970
1971 event.ifindex = dev->ifindex;
1972 memcpy(&event.app, del, sizeof(event.app));
1973 if (dev->dcbnl_ops->getdcbx)
1974 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1975
1976 spin_lock_bh(&dcb_lock);
1977
1978 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1979 list_del(&itr->list);
1980 kfree(itr);
1981 err = 0;
1982 }
1983
1984 spin_unlock_bh(&dcb_lock);
1985 if (!err)
1986 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1987 return err;
1988}
1989EXPORT_SYMBOL(dcb_ieee_delapp);
1990
1991
1992
1993
1994
1995
1996
1997void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
1998 struct dcb_ieee_app_prio_map *p_map)
1999{
2000 int ifindex = dev->ifindex;
2001 struct dcb_app_type *itr;
2002 u8 prio;
2003
2004 memset(p_map->map, 0, sizeof(p_map->map));
2005
2006 spin_lock_bh(&dcb_lock);
2007 list_for_each_entry(itr, &dcb_app_list, list) {
2008 if (itr->ifindex == ifindex &&
2009 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2010 itr->app.protocol < 64 &&
2011 itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2012 prio = itr->app.priority;
2013 p_map->map[prio] |= 1ULL << itr->app.protocol;
2014 }
2015 }
2016 spin_unlock_bh(&dcb_lock);
2017}
2018EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
2019
2020
2021
2022
2023
2024
2025
2026void
2027dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
2028 struct dcb_ieee_app_dscp_map *p_map)
2029{
2030 int ifindex = dev->ifindex;
2031 struct dcb_app_type *itr;
2032
2033 memset(p_map->map, 0, sizeof(p_map->map));
2034
2035 spin_lock_bh(&dcb_lock);
2036 list_for_each_entry(itr, &dcb_app_list, list) {
2037 if (itr->ifindex == ifindex &&
2038 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2039 itr->app.protocol < 64 &&
2040 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2041 p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
2042 }
2043 spin_unlock_bh(&dcb_lock);
2044}
2045EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
2058{
2059 int ifindex = dev->ifindex;
2060 struct dcb_app_type *itr;
2061 u8 mask = 0;
2062
2063 spin_lock_bh(&dcb_lock);
2064 list_for_each_entry(itr, &dcb_app_list, list) {
2065 if (itr->ifindex == ifindex &&
2066 itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
2067 itr->app.protocol == 0 &&
2068 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2069 mask |= 1 << itr->app.priority;
2070 }
2071 spin_unlock_bh(&dcb_lock);
2072
2073 return mask;
2074}
2075EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
2076
2077static int __init dcbnl_init(void)
2078{
2079 INIT_LIST_HEAD(&dcb_app_list);
2080
2081 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
2082 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
2083
2084 return 0;
2085}
2086device_initcall(dcbnl_init);
2087