1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/ethtool.h>
54#include <linux/module.h>
55#include <linux/types.h>
56#include <linux/kernel.h>
57#include <linux/string.h>
58#include <linux/errno.h>
59#include <linux/skbuff.h>
60#include <net/netevent.h>
61#include <net/netlink.h>
62#include <net/sch_generic.h>
63#include <net/pkt_sched.h>
64
65static LIST_HEAD(cbs_list);
66static DEFINE_SPINLOCK(cbs_list_lock);
67
68#define BYTES_PER_KBIT (1000LL / 8)
69
70struct cbs_sched_data {
71 bool offload;
72 int queue;
73 atomic64_t port_rate;
74 s64 last;
75 s64 credits;
76 s32 locredit;
77 s32 hicredit;
78 s64 sendslope;
79 s64 idleslope;
80 struct qdisc_watchdog watchdog;
81 int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
82 struct sk_buff **to_free);
83 struct sk_buff *(*dequeue)(struct Qdisc *sch);
84 struct Qdisc *qdisc;
85 struct list_head cbs_list;
86};
87
88static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
89 struct Qdisc *child,
90 struct sk_buff **to_free)
91{
92 unsigned int len = qdisc_pkt_len(skb);
93 int err;
94
95 err = child->ops->enqueue(skb, child, to_free);
96 if (err != NET_XMIT_SUCCESS)
97 return err;
98
99 sch->qstats.backlog += len;
100 sch->q.qlen++;
101
102 return NET_XMIT_SUCCESS;
103}
104
105static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
106 struct sk_buff **to_free)
107{
108 struct cbs_sched_data *q = qdisc_priv(sch);
109 struct Qdisc *qdisc = q->qdisc;
110
111 return cbs_child_enqueue(skb, sch, qdisc, to_free);
112}
113
114static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
115 struct sk_buff **to_free)
116{
117 struct cbs_sched_data *q = qdisc_priv(sch);
118 struct Qdisc *qdisc = q->qdisc;
119
120 if (sch->q.qlen == 0 && q->credits > 0) {
121
122
123
124 q->credits = 0;
125 q->last = ktime_get_ns();
126 }
127
128 return cbs_child_enqueue(skb, sch, qdisc, to_free);
129}
130
131static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
132 struct sk_buff **to_free)
133{
134 struct cbs_sched_data *q = qdisc_priv(sch);
135
136 return q->enqueue(skb, sch, to_free);
137}
138
139
140static s64 timediff_to_credits(s64 timediff, s64 slope)
141{
142 return div64_s64(timediff * slope, NSEC_PER_SEC);
143}
144
145static s64 delay_from_credits(s64 credits, s64 slope)
146{
147 if (unlikely(slope == 0))
148 return S64_MAX;
149
150 return div64_s64(-credits * NSEC_PER_SEC, slope);
151}
152
153static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate)
154{
155 if (unlikely(port_rate == 0))
156 return S64_MAX;
157
158 return div64_s64(len * slope, port_rate);
159}
160
161static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child)
162{
163 struct sk_buff *skb;
164
165 skb = child->ops->dequeue(child);
166 if (!skb)
167 return NULL;
168
169 qdisc_qstats_backlog_dec(sch, skb);
170 qdisc_bstats_update(sch, skb);
171 sch->q.qlen--;
172
173 return skb;
174}
175
176static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
177{
178 struct cbs_sched_data *q = qdisc_priv(sch);
179 struct Qdisc *qdisc = q->qdisc;
180 s64 now = ktime_get_ns();
181 struct sk_buff *skb;
182 s64 credits;
183 int len;
184
185
186 if (now < q->last) {
187 qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
188 return NULL;
189 }
190 if (q->credits < 0) {
191 credits = timediff_to_credits(now - q->last, q->idleslope);
192
193 credits = q->credits + credits;
194 q->credits = min_t(s64, credits, q->hicredit);
195
196 if (q->credits < 0) {
197 s64 delay;
198
199 delay = delay_from_credits(q->credits, q->idleslope);
200 qdisc_watchdog_schedule_ns(&q->watchdog, now + delay);
201
202 q->last = now;
203
204 return NULL;
205 }
206 }
207 skb = cbs_child_dequeue(sch, qdisc);
208 if (!skb)
209 return NULL;
210
211 len = qdisc_pkt_len(skb);
212
213
214
215
216 credits = credits_from_len(len, q->sendslope,
217 atomic64_read(&q->port_rate));
218 credits += q->credits;
219
220 q->credits = max_t(s64, credits, q->locredit);
221
222 if (unlikely(atomic64_read(&q->port_rate) == 0))
223 q->last = now;
224 else
225 q->last = now + div64_s64(len * NSEC_PER_SEC,
226 atomic64_read(&q->port_rate));
227
228 return skb;
229}
230
231static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch)
232{
233 struct cbs_sched_data *q = qdisc_priv(sch);
234 struct Qdisc *qdisc = q->qdisc;
235
236 return cbs_child_dequeue(sch, qdisc);
237}
238
239static struct sk_buff *cbs_dequeue(struct Qdisc *sch)
240{
241 struct cbs_sched_data *q = qdisc_priv(sch);
242
243 return q->dequeue(sch);
244}
245
246static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = {
247 [TCA_CBS_PARMS] = { .len = sizeof(struct tc_cbs_qopt) },
248};
249
250static void cbs_disable_offload(struct net_device *dev,
251 struct cbs_sched_data *q)
252{
253 struct tc_cbs_qopt_offload cbs = { };
254 const struct net_device_ops *ops;
255 int err;
256
257 if (!q->offload)
258 return;
259
260 q->enqueue = cbs_enqueue_soft;
261 q->dequeue = cbs_dequeue_soft;
262
263 ops = dev->netdev_ops;
264 if (!ops->ndo_setup_tc)
265 return;
266
267 cbs.queue = q->queue;
268 cbs.enable = 0;
269
270 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
271 if (err < 0)
272 pr_warn("Couldn't disable CBS offload for queue %d\n",
273 cbs.queue);
274}
275
276static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
277 const struct tc_cbs_qopt *opt,
278 struct netlink_ext_ack *extack)
279{
280 const struct net_device_ops *ops = dev->netdev_ops;
281 struct tc_cbs_qopt_offload cbs = { };
282 int err;
283
284 if (!ops->ndo_setup_tc) {
285 NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
286 return -EOPNOTSUPP;
287 }
288
289 cbs.queue = q->queue;
290
291 cbs.enable = 1;
292 cbs.hicredit = opt->hicredit;
293 cbs.locredit = opt->locredit;
294 cbs.idleslope = opt->idleslope;
295 cbs.sendslope = opt->sendslope;
296
297 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
298 if (err < 0) {
299 NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
300 return err;
301 }
302
303 q->enqueue = cbs_enqueue_offload;
304 q->dequeue = cbs_dequeue_offload;
305
306 return 0;
307}
308
309static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
310{
311 struct ethtool_link_ksettings ecmd;
312 int speed = SPEED_10;
313 int port_rate;
314 int err;
315
316 err = __ethtool_get_link_ksettings(dev, &ecmd);
317 if (err < 0)
318 goto skip;
319
320 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
321 speed = ecmd.base.speed;
322
323skip:
324 port_rate = speed * 1000 * BYTES_PER_KBIT;
325
326 atomic64_set(&q->port_rate, port_rate);
327 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
328 dev->name, (long long)atomic64_read(&q->port_rate),
329 ecmd.base.speed);
330}
331
332static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event,
333 void *ptr)
334{
335 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
336 struct cbs_sched_data *q;
337 struct net_device *qdev;
338 bool found = false;
339
340 ASSERT_RTNL();
341
342 if (event != NETDEV_UP && event != NETDEV_CHANGE)
343 return NOTIFY_DONE;
344
345 spin_lock(&cbs_list_lock);
346 list_for_each_entry(q, &cbs_list, cbs_list) {
347 qdev = qdisc_dev(q->qdisc);
348 if (qdev == dev) {
349 found = true;
350 break;
351 }
352 }
353 spin_unlock(&cbs_list_lock);
354
355 if (found)
356 cbs_set_port_rate(dev, q);
357
358 return NOTIFY_DONE;
359}
360
361static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
362 struct netlink_ext_ack *extack)
363{
364 struct cbs_sched_data *q = qdisc_priv(sch);
365 struct net_device *dev = qdisc_dev(sch);
366 struct nlattr *tb[TCA_CBS_MAX + 1];
367 struct tc_cbs_qopt *qopt;
368 int err;
369
370 err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy,
371 extack);
372 if (err < 0)
373 return err;
374
375 if (!tb[TCA_CBS_PARMS]) {
376 NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
377 return -EINVAL;
378 }
379
380 qopt = nla_data(tb[TCA_CBS_PARMS]);
381
382 if (!qopt->offload) {
383 cbs_set_port_rate(dev, q);
384 cbs_disable_offload(dev, q);
385 } else {
386 err = cbs_enable_offload(dev, q, qopt, extack);
387 if (err < 0)
388 return err;
389 }
390
391
392 q->hicredit = qopt->hicredit;
393 q->locredit = qopt->locredit;
394 q->idleslope = qopt->idleslope * BYTES_PER_KBIT;
395 q->sendslope = qopt->sendslope * BYTES_PER_KBIT;
396 q->offload = qopt->offload;
397
398 return 0;
399}
400
401static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
402 struct netlink_ext_ack *extack)
403{
404 struct cbs_sched_data *q = qdisc_priv(sch);
405 struct net_device *dev = qdisc_dev(sch);
406
407 if (!opt) {
408 NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
409 return -EINVAL;
410 }
411
412 q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
413 sch->handle, extack);
414 if (!q->qdisc)
415 return -ENOMEM;
416
417 spin_lock(&cbs_list_lock);
418 list_add(&q->cbs_list, &cbs_list);
419 spin_unlock(&cbs_list_lock);
420
421 qdisc_hash_add(q->qdisc, false);
422
423 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
424
425 q->enqueue = cbs_enqueue_soft;
426 q->dequeue = cbs_dequeue_soft;
427
428 qdisc_watchdog_init(&q->watchdog, sch);
429
430 return cbs_change(sch, opt, extack);
431}
432
433static void cbs_destroy(struct Qdisc *sch)
434{
435 struct cbs_sched_data *q = qdisc_priv(sch);
436 struct net_device *dev = qdisc_dev(sch);
437
438
439 if (!q->qdisc)
440 return;
441
442 qdisc_watchdog_cancel(&q->watchdog);
443 cbs_disable_offload(dev, q);
444
445 spin_lock(&cbs_list_lock);
446 list_del(&q->cbs_list);
447 spin_unlock(&cbs_list_lock);
448
449 qdisc_put(q->qdisc);
450}
451
452static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
453{
454 struct cbs_sched_data *q = qdisc_priv(sch);
455 struct tc_cbs_qopt opt = { };
456 struct nlattr *nest;
457
458 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
459 if (!nest)
460 goto nla_put_failure;
461
462 opt.hicredit = q->hicredit;
463 opt.locredit = q->locredit;
464 opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT);
465 opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT);
466 opt.offload = q->offload;
467
468 if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt))
469 goto nla_put_failure;
470
471 return nla_nest_end(skb, nest);
472
473nla_put_failure:
474 nla_nest_cancel(skb, nest);
475 return -1;
476}
477
478static int cbs_dump_class(struct Qdisc *sch, unsigned long cl,
479 struct sk_buff *skb, struct tcmsg *tcm)
480{
481 struct cbs_sched_data *q = qdisc_priv(sch);
482
483 if (cl != 1 || !q->qdisc)
484 return -ENOENT;
485
486 tcm->tcm_handle |= TC_H_MIN(1);
487 tcm->tcm_info = q->qdisc->handle;
488
489 return 0;
490}
491
492static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
493 struct Qdisc **old, struct netlink_ext_ack *extack)
494{
495 struct cbs_sched_data *q = qdisc_priv(sch);
496
497 if (!new) {
498 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
499 sch->handle, NULL);
500 if (!new)
501 new = &noop_qdisc;
502 }
503
504 *old = qdisc_replace(sch, new, &q->qdisc);
505 return 0;
506}
507
508static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg)
509{
510 struct cbs_sched_data *q = qdisc_priv(sch);
511
512 return q->qdisc;
513}
514
515static unsigned long cbs_find(struct Qdisc *sch, u32 classid)
516{
517 return 1;
518}
519
520static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker)
521{
522 if (!walker->stop) {
523 if (walker->count >= walker->skip) {
524 if (walker->fn(sch, 1, walker) < 0) {
525 walker->stop = 1;
526 return;
527 }
528 }
529 walker->count++;
530 }
531}
532
533static const struct Qdisc_class_ops cbs_class_ops = {
534 .graft = cbs_graft,
535 .leaf = cbs_leaf,
536 .find = cbs_find,
537 .walk = cbs_walk,
538 .dump = cbs_dump_class,
539};
540
541static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
542 .id = "cbs",
543 .cl_ops = &cbs_class_ops,
544 .priv_size = sizeof(struct cbs_sched_data),
545 .enqueue = cbs_enqueue,
546 .dequeue = cbs_dequeue,
547 .peek = qdisc_peek_dequeued,
548 .init = cbs_init,
549 .reset = qdisc_reset_queue,
550 .destroy = cbs_destroy,
551 .change = cbs_change,
552 .dump = cbs_dump,
553 .owner = THIS_MODULE,
554};
555
556static struct notifier_block cbs_device_notifier = {
557 .notifier_call = cbs_dev_notifier,
558};
559
560static int __init cbs_module_init(void)
561{
562 int err;
563
564 err = register_netdevice_notifier(&cbs_device_notifier);
565 if (err)
566 return err;
567
568 err = register_qdisc(&cbs_qdisc_ops);
569 if (err)
570 unregister_netdevice_notifier(&cbs_device_notifier);
571
572 return err;
573}
574
575static void __exit cbs_module_exit(void)
576{
577 unregister_qdisc(&cbs_qdisc_ops);
578 unregister_netdevice_notifier(&cbs_device_notifier);
579}
580module_init(cbs_module_init)
581module_exit(cbs_module_exit)
582MODULE_LICENSE("GPL");
583