1
2
3
4
5
6
7
8
9
10
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/slab.h>
17#include <linux/nsproxy.h>
18#include <net/sock.h>
19#include <net/net_namespace.h>
20#include <linux/rtnetlink.h>
21#include <linux/vmalloc.h>
22#include <linux/export.h>
23#include <linux/jiffies.h>
24#include <linux/pm_runtime.h>
25
26#include "net-sysfs.h"
27
28#ifdef CONFIG_SYSFS
29static const char fmt_hex[] = "%#x\n";
30static const char fmt_long_hex[] = "%#lx\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_udec[] = "%u\n";
33static const char fmt_ulong[] = "%lu\n";
34static const char fmt_u64[] = "%llu\n";
35
36static inline int dev_isalive(const struct net_device *dev)
37{
38 return dev->reg_state <= NETREG_REGISTERED;
39}
40
41
42static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
45{
46 struct net_device *net = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
48
49 read_lock(&dev_base_lock);
50 if (dev_isalive(net))
51 ret = (*format)(net, buf);
52 read_unlock(&dev_base_lock);
53
54 return ret;
55}
56
57
58#define NETDEVICE_SHOW(field, format_string) \
59static ssize_t format_##field(const struct net_device *net, char *buf) \
60{ \
61 return sprintf(buf, format_string, net->field); \
62} \
63static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
65{ \
66 return netdev_show(dev, attr, buf, format_##field); \
67} \
68
69#define NETDEVICE_SHOW_RO(field, format_string) \
70NETDEVICE_SHOW(field, format_string); \
71static DEVICE_ATTR_RO(field)
72
73#define NETDEVICE_SHOW_RW(field, format_string) \
74NETDEVICE_SHOW(field, format_string); \
75static DEVICE_ATTR_RW(field)
76
77
78static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
81{
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
84 unsigned long new;
85 int ret = -EINVAL;
86
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88 return -EPERM;
89
90 ret = kstrtoul(buf, 0, &new);
91 if (ret)
92 goto err;
93
94 if (!rtnl_trylock())
95 return restart_syscall();
96
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
99 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
108NETDEVICE_SHOW_RO(addr_len, fmt_dec);
109NETDEVICE_SHOW_RO(iflink, fmt_dec);
110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
113
114
115static ssize_t address_show(struct device *dev, struct device_attribute *attr,
116 char *buf)
117{
118 struct net_device *net = to_net_dev(dev);
119 ssize_t ret = -EINVAL;
120
121 read_lock(&dev_base_lock);
122 if (dev_isalive(net))
123 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
124 read_unlock(&dev_base_lock);
125 return ret;
126}
127static DEVICE_ATTR_RO(address);
128
129static ssize_t broadcast_show(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct net_device *net = to_net_dev(dev);
133 if (dev_isalive(net))
134 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
135 return -EINVAL;
136}
137static DEVICE_ATTR_RO(broadcast);
138
139static int change_carrier(struct net_device *net, unsigned long new_carrier)
140{
141 if (!netif_running(net))
142 return -EINVAL;
143 return dev_change_carrier(net, (bool) new_carrier);
144}
145
146static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
147 const char *buf, size_t len)
148{
149 return netdev_store(dev, attr, buf, len, change_carrier);
150}
151
152static ssize_t carrier_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
154{
155 struct net_device *netdev = to_net_dev(dev);
156 if (netif_running(netdev)) {
157 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
158 }
159 return -EINVAL;
160}
161static DEVICE_ATTR_RW(carrier);
162
163static ssize_t speed_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 struct net_device *netdev = to_net_dev(dev);
167 int ret = -EINVAL;
168
169 if (!rtnl_trylock())
170 return restart_syscall();
171
172 if (netif_running(netdev)) {
173 struct ethtool_cmd cmd;
174 if (!__ethtool_get_settings(netdev, &cmd))
175 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
176 }
177 rtnl_unlock();
178 return ret;
179}
180static DEVICE_ATTR_RO(speed);
181
182static ssize_t duplex_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct net_device *netdev = to_net_dev(dev);
186 int ret = -EINVAL;
187
188 if (!rtnl_trylock())
189 return restart_syscall();
190
191 if (netif_running(netdev)) {
192 struct ethtool_cmd cmd;
193 if (!__ethtool_get_settings(netdev, &cmd)) {
194 const char *duplex;
195 switch (cmd.duplex) {
196 case DUPLEX_HALF:
197 duplex = "half";
198 break;
199 case DUPLEX_FULL:
200 duplex = "full";
201 break;
202 default:
203 duplex = "unknown";
204 break;
205 }
206 ret = sprintf(buf, "%s\n", duplex);
207 }
208 }
209 rtnl_unlock();
210 return ret;
211}
212static DEVICE_ATTR_RO(duplex);
213
214static ssize_t dormant_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 struct net_device *netdev = to_net_dev(dev);
218
219 if (netif_running(netdev))
220 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
221
222 return -EINVAL;
223}
224static DEVICE_ATTR_RO(dormant);
225
226static const char *const operstates[] = {
227 "unknown",
228 "notpresent",
229 "down",
230 "lowerlayerdown",
231 "testing",
232 "dormant",
233 "up"
234};
235
236static ssize_t operstate_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
238{
239 const struct net_device *netdev = to_net_dev(dev);
240 unsigned char operstate;
241
242 read_lock(&dev_base_lock);
243 operstate = netdev->operstate;
244 if (!netif_running(netdev))
245 operstate = IF_OPER_DOWN;
246 read_unlock(&dev_base_lock);
247
248 if (operstate >= ARRAY_SIZE(operstates))
249 return -EINVAL;
250
251 return sprintf(buf, "%s\n", operstates[operstate]);
252}
253static DEVICE_ATTR_RO(operstate);
254
255
256
257static int change_mtu(struct net_device *net, unsigned long new_mtu)
258{
259 return dev_set_mtu(net, (int) new_mtu);
260}
261
262static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t len)
264{
265 return netdev_store(dev, attr, buf, len, change_mtu);
266}
267NETDEVICE_SHOW_RW(mtu, fmt_dec);
268
269static int change_flags(struct net_device *net, unsigned long new_flags)
270{
271 return dev_change_flags(net, (unsigned int) new_flags);
272}
273
274static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
275 const char *buf, size_t len)
276{
277 return netdev_store(dev, attr, buf, len, change_flags);
278}
279NETDEVICE_SHOW_RW(flags, fmt_hex);
280
281static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
282{
283 net->tx_queue_len = new_len;
284 return 0;
285}
286
287static ssize_t tx_queue_len_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t len)
290{
291 if (!capable(CAP_NET_ADMIN))
292 return -EPERM;
293
294 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
295}
296NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
297
298static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
299 const char *buf, size_t len)
300{
301 struct net_device *netdev = to_net_dev(dev);
302 struct net *net = dev_net(netdev);
303 size_t count = len;
304 ssize_t ret;
305
306 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
307 return -EPERM;
308
309
310 if (len > 0 && buf[len - 1] == '\n')
311 --count;
312
313 if (!rtnl_trylock())
314 return restart_syscall();
315 ret = dev_set_alias(netdev, buf, count);
316 rtnl_unlock();
317
318 return ret < 0 ? ret : len;
319}
320
321static ssize_t ifalias_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 const struct net_device *netdev = to_net_dev(dev);
325 ssize_t ret = 0;
326
327 if (!rtnl_trylock())
328 return restart_syscall();
329 if (netdev->ifalias)
330 ret = sprintf(buf, "%s\n", netdev->ifalias);
331 rtnl_unlock();
332 return ret;
333}
334static DEVICE_ATTR_RW(ifalias);
335
336static int change_group(struct net_device *net, unsigned long new_group)
337{
338 dev_set_group(net, (int) new_group);
339 return 0;
340}
341
342static ssize_t group_store(struct device *dev, struct device_attribute *attr,
343 const char *buf, size_t len)
344{
345 return netdev_store(dev, attr, buf, len, change_group);
346}
347NETDEVICE_SHOW(group, fmt_dec);
348static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
349
350static ssize_t phys_port_id_show(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct net_device *netdev = to_net_dev(dev);
354 ssize_t ret = -EINVAL;
355
356 if (!rtnl_trylock())
357 return restart_syscall();
358
359 if (dev_isalive(netdev)) {
360 struct netdev_phys_port_id ppid;
361
362 ret = dev_get_phys_port_id(netdev, &ppid);
363 if (!ret)
364 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
365 }
366 rtnl_unlock();
367
368 return ret;
369}
370static DEVICE_ATTR_RO(phys_port_id);
371
372static struct attribute *net_class_attrs[] = {
373 &dev_attr_netdev_group.attr,
374 &dev_attr_type.attr,
375 &dev_attr_dev_id.attr,
376 &dev_attr_iflink.attr,
377 &dev_attr_ifindex.attr,
378 &dev_attr_addr_assign_type.attr,
379 &dev_attr_addr_len.attr,
380 &dev_attr_link_mode.attr,
381 &dev_attr_address.attr,
382 &dev_attr_broadcast.attr,
383 &dev_attr_speed.attr,
384 &dev_attr_duplex.attr,
385 &dev_attr_dormant.attr,
386 &dev_attr_operstate.attr,
387 &dev_attr_ifalias.attr,
388 &dev_attr_carrier.attr,
389 &dev_attr_mtu.attr,
390 &dev_attr_flags.attr,
391 &dev_attr_tx_queue_len.attr,
392 &dev_attr_phys_port_id.attr,
393 NULL,
394};
395ATTRIBUTE_GROUPS(net_class);
396
397
398static ssize_t netstat_show(const struct device *d,
399 struct device_attribute *attr, char *buf,
400 unsigned long offset)
401{
402 struct net_device *dev = to_net_dev(d);
403 ssize_t ret = -EINVAL;
404
405 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
406 offset % sizeof(u64) != 0);
407
408 read_lock(&dev_base_lock);
409 if (dev_isalive(dev)) {
410 struct rtnl_link_stats64 temp;
411 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
412
413 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
414 }
415 read_unlock(&dev_base_lock);
416 return ret;
417}
418
419
420#define NETSTAT_ENTRY(name) \
421static ssize_t name##_show(struct device *d, \
422 struct device_attribute *attr, char *buf) \
423{ \
424 return netstat_show(d, attr, buf, \
425 offsetof(struct rtnl_link_stats64, name)); \
426} \
427static DEVICE_ATTR_RO(name)
428
429NETSTAT_ENTRY(rx_packets);
430NETSTAT_ENTRY(tx_packets);
431NETSTAT_ENTRY(rx_bytes);
432NETSTAT_ENTRY(tx_bytes);
433NETSTAT_ENTRY(rx_errors);
434NETSTAT_ENTRY(tx_errors);
435NETSTAT_ENTRY(rx_dropped);
436NETSTAT_ENTRY(tx_dropped);
437NETSTAT_ENTRY(multicast);
438NETSTAT_ENTRY(collisions);
439NETSTAT_ENTRY(rx_length_errors);
440NETSTAT_ENTRY(rx_over_errors);
441NETSTAT_ENTRY(rx_crc_errors);
442NETSTAT_ENTRY(rx_frame_errors);
443NETSTAT_ENTRY(rx_fifo_errors);
444NETSTAT_ENTRY(rx_missed_errors);
445NETSTAT_ENTRY(tx_aborted_errors);
446NETSTAT_ENTRY(tx_carrier_errors);
447NETSTAT_ENTRY(tx_fifo_errors);
448NETSTAT_ENTRY(tx_heartbeat_errors);
449NETSTAT_ENTRY(tx_window_errors);
450NETSTAT_ENTRY(rx_compressed);
451NETSTAT_ENTRY(tx_compressed);
452
453static struct attribute *netstat_attrs[] = {
454 &dev_attr_rx_packets.attr,
455 &dev_attr_tx_packets.attr,
456 &dev_attr_rx_bytes.attr,
457 &dev_attr_tx_bytes.attr,
458 &dev_attr_rx_errors.attr,
459 &dev_attr_tx_errors.attr,
460 &dev_attr_rx_dropped.attr,
461 &dev_attr_tx_dropped.attr,
462 &dev_attr_multicast.attr,
463 &dev_attr_collisions.attr,
464 &dev_attr_rx_length_errors.attr,
465 &dev_attr_rx_over_errors.attr,
466 &dev_attr_rx_crc_errors.attr,
467 &dev_attr_rx_frame_errors.attr,
468 &dev_attr_rx_fifo_errors.attr,
469 &dev_attr_rx_missed_errors.attr,
470 &dev_attr_tx_aborted_errors.attr,
471 &dev_attr_tx_carrier_errors.attr,
472 &dev_attr_tx_fifo_errors.attr,
473 &dev_attr_tx_heartbeat_errors.attr,
474 &dev_attr_tx_window_errors.attr,
475 &dev_attr_rx_compressed.attr,
476 &dev_attr_tx_compressed.attr,
477 NULL
478};
479
480
481static struct attribute_group netstat_group = {
482 .name = "statistics",
483 .attrs = netstat_attrs,
484};
485
486#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
487static struct attribute *wireless_attrs[] = {
488 NULL
489};
490
491static struct attribute_group wireless_group = {
492 .name = "wireless",
493 .attrs = wireless_attrs,
494};
495#endif
496
497#else
498#define net_class_groups NULL
499#endif
500
501#ifdef CONFIG_SYSFS
502#define to_rx_queue_attr(_attr) container_of(_attr, \
503 struct rx_queue_attribute, attr)
504
505#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
506
507static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
508 char *buf)
509{
510 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
511 struct netdev_rx_queue *queue = to_rx_queue(kobj);
512
513 if (!attribute->show)
514 return -EIO;
515
516 return attribute->show(queue, attribute, buf);
517}
518
519static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
520 const char *buf, size_t count)
521{
522 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
523 struct netdev_rx_queue *queue = to_rx_queue(kobj);
524
525 if (!attribute->store)
526 return -EIO;
527
528 return attribute->store(queue, attribute, buf, count);
529}
530
531static const struct sysfs_ops rx_queue_sysfs_ops = {
532 .show = rx_queue_attr_show,
533 .store = rx_queue_attr_store,
534};
535
536#ifdef CONFIG_RPS
537static ssize_t show_rps_map(struct netdev_rx_queue *queue,
538 struct rx_queue_attribute *attribute, char *buf)
539{
540 struct rps_map *map;
541 cpumask_var_t mask;
542 size_t len = 0;
543 int i;
544
545 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
546 return -ENOMEM;
547
548 rcu_read_lock();
549 map = rcu_dereference(queue->rps_map);
550 if (map)
551 for (i = 0; i < map->len; i++)
552 cpumask_set_cpu(map->cpus[i], mask);
553
554 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
555 if (PAGE_SIZE - len < 3) {
556 rcu_read_unlock();
557 free_cpumask_var(mask);
558 return -EINVAL;
559 }
560 rcu_read_unlock();
561
562 free_cpumask_var(mask);
563 len += sprintf(buf + len, "\n");
564 return len;
565}
566
567static ssize_t store_rps_map(struct netdev_rx_queue *queue,
568 struct rx_queue_attribute *attribute,
569 const char *buf, size_t len)
570{
571 struct rps_map *old_map, *map;
572 cpumask_var_t mask;
573 int err, cpu, i;
574 static DEFINE_SPINLOCK(rps_map_lock);
575
576 if (!capable(CAP_NET_ADMIN))
577 return -EPERM;
578
579 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
580 return -ENOMEM;
581
582 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
583 if (err) {
584 free_cpumask_var(mask);
585 return err;
586 }
587
588 map = kzalloc(max_t(unsigned int,
589 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
590 GFP_KERNEL);
591 if (!map) {
592 free_cpumask_var(mask);
593 return -ENOMEM;
594 }
595
596 i = 0;
597 for_each_cpu_and(cpu, mask, cpu_online_mask)
598 map->cpus[i++] = cpu;
599
600 if (i)
601 map->len = i;
602 else {
603 kfree(map);
604 map = NULL;
605 }
606
607 spin_lock(&rps_map_lock);
608 old_map = rcu_dereference_protected(queue->rps_map,
609 lockdep_is_held(&rps_map_lock));
610 rcu_assign_pointer(queue->rps_map, map);
611 spin_unlock(&rps_map_lock);
612
613 if (map)
614 static_key_slow_inc(&rps_needed);
615 if (old_map) {
616 kfree_rcu(old_map, rcu);
617 static_key_slow_dec(&rps_needed);
618 }
619 free_cpumask_var(mask);
620 return len;
621}
622
623static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
624 struct rx_queue_attribute *attr,
625 char *buf)
626{
627 struct rps_dev_flow_table *flow_table;
628 unsigned long val = 0;
629
630 rcu_read_lock();
631 flow_table = rcu_dereference(queue->rps_flow_table);
632 if (flow_table)
633 val = (unsigned long)flow_table->mask + 1;
634 rcu_read_unlock();
635
636 return sprintf(buf, "%lu\n", val);
637}
638
639static void rps_dev_flow_table_release(struct rcu_head *rcu)
640{
641 struct rps_dev_flow_table *table = container_of(rcu,
642 struct rps_dev_flow_table, rcu);
643 vfree(table);
644}
645
646static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
647 struct rx_queue_attribute *attr,
648 const char *buf, size_t len)
649{
650 unsigned long mask, count;
651 struct rps_dev_flow_table *table, *old_table;
652 static DEFINE_SPINLOCK(rps_dev_flow_lock);
653 int rc;
654
655 if (!capable(CAP_NET_ADMIN))
656 return -EPERM;
657
658 rc = kstrtoul(buf, 0, &count);
659 if (rc < 0)
660 return rc;
661
662 if (count) {
663 mask = count - 1;
664
665
666
667 while ((mask | (mask >> 1)) != mask)
668 mask |= (mask >> 1);
669
670
671
672
673#if BITS_PER_LONG > 32
674 if (mask > (unsigned long)(u32)mask)
675 return -EINVAL;
676#else
677 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
678 / sizeof(struct rps_dev_flow)) {
679
680 return -EINVAL;
681 }
682#endif
683 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
684 if (!table)
685 return -ENOMEM;
686
687 table->mask = mask;
688 for (count = 0; count <= mask; count++)
689 table->flows[count].cpu = RPS_NO_CPU;
690 } else
691 table = NULL;
692
693 spin_lock(&rps_dev_flow_lock);
694 old_table = rcu_dereference_protected(queue->rps_flow_table,
695 lockdep_is_held(&rps_dev_flow_lock));
696 rcu_assign_pointer(queue->rps_flow_table, table);
697 spin_unlock(&rps_dev_flow_lock);
698
699 if (old_table)
700 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
701
702 return len;
703}
704
705static struct rx_queue_attribute rps_cpus_attribute =
706 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
707
708
709static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
710 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
711 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
712#endif
713
714static struct attribute *rx_queue_default_attrs[] = {
715#ifdef CONFIG_RPS
716 &rps_cpus_attribute.attr,
717 &rps_dev_flow_table_cnt_attribute.attr,
718#endif
719 NULL
720};
721
722static void rx_queue_release(struct kobject *kobj)
723{
724 struct netdev_rx_queue *queue = to_rx_queue(kobj);
725#ifdef CONFIG_RPS
726 struct rps_map *map;
727 struct rps_dev_flow_table *flow_table;
728
729
730 map = rcu_dereference_protected(queue->rps_map, 1);
731 if (map) {
732 RCU_INIT_POINTER(queue->rps_map, NULL);
733 kfree_rcu(map, rcu);
734 }
735
736 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
737 if (flow_table) {
738 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
739 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
740 }
741#endif
742
743 memset(kobj, 0, sizeof(*kobj));
744 dev_put(queue->dev);
745}
746
747static const void *rx_queue_namespace(struct kobject *kobj)
748{
749 struct netdev_rx_queue *queue = to_rx_queue(kobj);
750 struct device *dev = &queue->dev->dev;
751 const void *ns = NULL;
752
753 if (dev->class && dev->class->ns_type)
754 ns = dev->class->namespace(dev);
755
756 return ns;
757}
758
759static struct kobj_type rx_queue_ktype = {
760 .sysfs_ops = &rx_queue_sysfs_ops,
761 .release = rx_queue_release,
762 .default_attrs = rx_queue_default_attrs,
763 .namespace = rx_queue_namespace
764};
765
766static int rx_queue_add_kobject(struct net_device *net, int index)
767{
768 struct netdev_rx_queue *queue = net->_rx + index;
769 struct kobject *kobj = &queue->kobj;
770 int error = 0;
771
772 kobj->kset = net->queues_kset;
773 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
774 "rx-%u", index);
775 if (error)
776 goto exit;
777
778 if (net->sysfs_rx_queue_group) {
779 error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
780 if (error)
781 goto exit;
782 }
783
784 kobject_uevent(kobj, KOBJ_ADD);
785 dev_hold(queue->dev);
786
787 return error;
788exit:
789 kobject_put(kobj);
790 return error;
791}
792#endif
793
794int
795net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
796{
797#ifdef CONFIG_SYSFS
798 int i;
799 int error = 0;
800
801#ifndef CONFIG_RPS
802 if (!net->sysfs_rx_queue_group)
803 return 0;
804#endif
805 for (i = old_num; i < new_num; i++) {
806 error = rx_queue_add_kobject(net, i);
807 if (error) {
808 new_num = old_num;
809 break;
810 }
811 }
812
813 while (--i >= new_num) {
814 if (net->sysfs_rx_queue_group)
815 sysfs_remove_group(&net->_rx[i].kobj,
816 net->sysfs_rx_queue_group);
817 kobject_put(&net->_rx[i].kobj);
818 }
819
820 return error;
821#else
822 return 0;
823#endif
824}
825
826#ifdef CONFIG_SYSFS
827
828
829
830struct netdev_queue_attribute {
831 struct attribute attr;
832 ssize_t (*show)(struct netdev_queue *queue,
833 struct netdev_queue_attribute *attr, char *buf);
834 ssize_t (*store)(struct netdev_queue *queue,
835 struct netdev_queue_attribute *attr, const char *buf, size_t len);
836};
837#define to_netdev_queue_attr(_attr) container_of(_attr, \
838 struct netdev_queue_attribute, attr)
839
840#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
841
842static ssize_t netdev_queue_attr_show(struct kobject *kobj,
843 struct attribute *attr, char *buf)
844{
845 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
846 struct netdev_queue *queue = to_netdev_queue(kobj);
847
848 if (!attribute->show)
849 return -EIO;
850
851 return attribute->show(queue, attribute, buf);
852}
853
854static ssize_t netdev_queue_attr_store(struct kobject *kobj,
855 struct attribute *attr,
856 const char *buf, size_t count)
857{
858 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
859 struct netdev_queue *queue = to_netdev_queue(kobj);
860
861 if (!attribute->store)
862 return -EIO;
863
864 return attribute->store(queue, attribute, buf, count);
865}
866
867static const struct sysfs_ops netdev_queue_sysfs_ops = {
868 .show = netdev_queue_attr_show,
869 .store = netdev_queue_attr_store,
870};
871
872static ssize_t show_trans_timeout(struct netdev_queue *queue,
873 struct netdev_queue_attribute *attribute,
874 char *buf)
875{
876 unsigned long trans_timeout;
877
878 spin_lock_irq(&queue->_xmit_lock);
879 trans_timeout = queue->trans_timeout;
880 spin_unlock_irq(&queue->_xmit_lock);
881
882 return sprintf(buf, "%lu", trans_timeout);
883}
884
885static struct netdev_queue_attribute queue_trans_timeout =
886 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
887
888#ifdef CONFIG_BQL
889
890
891
892static ssize_t bql_show(char *buf, unsigned int value)
893{
894 return sprintf(buf, "%u\n", value);
895}
896
897static ssize_t bql_set(const char *buf, const size_t count,
898 unsigned int *pvalue)
899{
900 unsigned int value;
901 int err;
902
903 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
904 value = DQL_MAX_LIMIT;
905 else {
906 err = kstrtouint(buf, 10, &value);
907 if (err < 0)
908 return err;
909 if (value > DQL_MAX_LIMIT)
910 return -EINVAL;
911 }
912
913 *pvalue = value;
914
915 return count;
916}
917
918static ssize_t bql_show_hold_time(struct netdev_queue *queue,
919 struct netdev_queue_attribute *attr,
920 char *buf)
921{
922 struct dql *dql = &queue->dql;
923
924 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
925}
926
927static ssize_t bql_set_hold_time(struct netdev_queue *queue,
928 struct netdev_queue_attribute *attribute,
929 const char *buf, size_t len)
930{
931 struct dql *dql = &queue->dql;
932 unsigned int value;
933 int err;
934
935 err = kstrtouint(buf, 10, &value);
936 if (err < 0)
937 return err;
938
939 dql->slack_hold_time = msecs_to_jiffies(value);
940
941 return len;
942}
943
944static struct netdev_queue_attribute bql_hold_time_attribute =
945 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
946 bql_set_hold_time);
947
948static ssize_t bql_show_inflight(struct netdev_queue *queue,
949 struct netdev_queue_attribute *attr,
950 char *buf)
951{
952 struct dql *dql = &queue->dql;
953
954 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
955}
956
957static struct netdev_queue_attribute bql_inflight_attribute =
958 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
959
960#define BQL_ATTR(NAME, FIELD) \
961static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
962 struct netdev_queue_attribute *attr, \
963 char *buf) \
964{ \
965 return bql_show(buf, queue->dql.FIELD); \
966} \
967 \
968static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
969 struct netdev_queue_attribute *attr, \
970 const char *buf, size_t len) \
971{ \
972 return bql_set(buf, len, &queue->dql.FIELD); \
973} \
974 \
975static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
976 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
977 bql_set_ ## NAME);
978
979BQL_ATTR(limit, limit)
980BQL_ATTR(limit_max, max_limit)
981BQL_ATTR(limit_min, min_limit)
982
983static struct attribute *dql_attrs[] = {
984 &bql_limit_attribute.attr,
985 &bql_limit_max_attribute.attr,
986 &bql_limit_min_attribute.attr,
987 &bql_hold_time_attribute.attr,
988 &bql_inflight_attribute.attr,
989 NULL
990};
991
992static struct attribute_group dql_group = {
993 .name = "byte_queue_limits",
994 .attrs = dql_attrs,
995};
996#endif
997
998#ifdef CONFIG_XPS
999static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1000{
1001 struct net_device *dev = queue->dev;
1002 int i;
1003
1004 for (i = 0; i < dev->num_tx_queues; i++)
1005 if (queue == &dev->_tx[i])
1006 break;
1007
1008 BUG_ON(i >= dev->num_tx_queues);
1009
1010 return i;
1011}
1012
1013
1014static ssize_t show_xps_map(struct netdev_queue *queue,
1015 struct netdev_queue_attribute *attribute, char *buf)
1016{
1017 struct net_device *dev = queue->dev;
1018 struct xps_dev_maps *dev_maps;
1019 cpumask_var_t mask;
1020 unsigned long index;
1021 size_t len = 0;
1022 int i;
1023
1024 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1025 return -ENOMEM;
1026
1027 index = get_netdev_queue_index(queue);
1028
1029 rcu_read_lock();
1030 dev_maps = rcu_dereference(dev->xps_maps);
1031 if (dev_maps) {
1032 for_each_possible_cpu(i) {
1033 struct xps_map *map =
1034 rcu_dereference(dev_maps->cpu_map[i]);
1035 if (map) {
1036 int j;
1037 for (j = 0; j < map->len; j++) {
1038 if (map->queues[j] == index) {
1039 cpumask_set_cpu(i, mask);
1040 break;
1041 }
1042 }
1043 }
1044 }
1045 }
1046 rcu_read_unlock();
1047
1048 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1049 if (PAGE_SIZE - len < 3) {
1050 free_cpumask_var(mask);
1051 return -EINVAL;
1052 }
1053
1054 free_cpumask_var(mask);
1055 len += sprintf(buf + len, "\n");
1056 return len;
1057}
1058
1059static ssize_t store_xps_map(struct netdev_queue *queue,
1060 struct netdev_queue_attribute *attribute,
1061 const char *buf, size_t len)
1062{
1063 struct net_device *dev = queue->dev;
1064 unsigned long index;
1065 cpumask_var_t mask;
1066 int err;
1067
1068 if (!capable(CAP_NET_ADMIN))
1069 return -EPERM;
1070
1071 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1072 return -ENOMEM;
1073
1074 index = get_netdev_queue_index(queue);
1075
1076 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1077 if (err) {
1078 free_cpumask_var(mask);
1079 return err;
1080 }
1081
1082 err = netif_set_xps_queue(dev, mask, index);
1083
1084 free_cpumask_var(mask);
1085
1086 return err ? : len;
1087}
1088
1089static struct netdev_queue_attribute xps_cpus_attribute =
1090 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1091#endif
1092
1093static struct attribute *netdev_queue_default_attrs[] = {
1094 &queue_trans_timeout.attr,
1095#ifdef CONFIG_XPS
1096 &xps_cpus_attribute.attr,
1097#endif
1098 NULL
1099};
1100
1101static void netdev_queue_release(struct kobject *kobj)
1102{
1103 struct netdev_queue *queue = to_netdev_queue(kobj);
1104
1105 memset(kobj, 0, sizeof(*kobj));
1106 dev_put(queue->dev);
1107}
1108
1109static const void *netdev_queue_namespace(struct kobject *kobj)
1110{
1111 struct netdev_queue *queue = to_netdev_queue(kobj);
1112 struct device *dev = &queue->dev->dev;
1113 const void *ns = NULL;
1114
1115 if (dev->class && dev->class->ns_type)
1116 ns = dev->class->namespace(dev);
1117
1118 return ns;
1119}
1120
1121static struct kobj_type netdev_queue_ktype = {
1122 .sysfs_ops = &netdev_queue_sysfs_ops,
1123 .release = netdev_queue_release,
1124 .default_attrs = netdev_queue_default_attrs,
1125 .namespace = netdev_queue_namespace,
1126};
1127
1128static int netdev_queue_add_kobject(struct net_device *net, int index)
1129{
1130 struct netdev_queue *queue = net->_tx + index;
1131 struct kobject *kobj = &queue->kobj;
1132 int error = 0;
1133
1134 kobj->kset = net->queues_kset;
1135 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1136 "tx-%u", index);
1137 if (error)
1138 goto exit;
1139
1140#ifdef CONFIG_BQL
1141 error = sysfs_create_group(kobj, &dql_group);
1142 if (error)
1143 goto exit;
1144#endif
1145
1146 kobject_uevent(kobj, KOBJ_ADD);
1147 dev_hold(queue->dev);
1148
1149 return 0;
1150exit:
1151 kobject_put(kobj);
1152 return error;
1153}
1154#endif
1155
1156int
1157netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1158{
1159#ifdef CONFIG_SYSFS
1160 int i;
1161 int error = 0;
1162
1163 for (i = old_num; i < new_num; i++) {
1164 error = netdev_queue_add_kobject(net, i);
1165 if (error) {
1166 new_num = old_num;
1167 break;
1168 }
1169 }
1170
1171 while (--i >= new_num) {
1172 struct netdev_queue *queue = net->_tx + i;
1173
1174#ifdef CONFIG_BQL
1175 sysfs_remove_group(&queue->kobj, &dql_group);
1176#endif
1177 kobject_put(&queue->kobj);
1178 }
1179
1180 return error;
1181#else
1182 return 0;
1183#endif
1184}
1185
1186static int register_queue_kobjects(struct net_device *net)
1187{
1188 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1189
1190#ifdef CONFIG_SYSFS
1191 net->queues_kset = kset_create_and_add("queues",
1192 NULL, &net->dev.kobj);
1193 if (!net->queues_kset)
1194 return -ENOMEM;
1195 real_rx = net->real_num_rx_queues;
1196#endif
1197 real_tx = net->real_num_tx_queues;
1198
1199 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1200 if (error)
1201 goto error;
1202 rxq = real_rx;
1203
1204 error = netdev_queue_update_kobjects(net, 0, real_tx);
1205 if (error)
1206 goto error;
1207 txq = real_tx;
1208
1209 return 0;
1210
1211error:
1212 netdev_queue_update_kobjects(net, txq, 0);
1213 net_rx_queue_update_kobjects(net, rxq, 0);
1214 return error;
1215}
1216
1217static void remove_queue_kobjects(struct net_device *net)
1218{
1219 int real_rx = 0, real_tx = 0;
1220
1221#ifdef CONFIG_SYSFS
1222 real_rx = net->real_num_rx_queues;
1223#endif
1224 real_tx = net->real_num_tx_queues;
1225
1226 net_rx_queue_update_kobjects(net, real_rx, 0);
1227 netdev_queue_update_kobjects(net, real_tx, 0);
1228#ifdef CONFIG_SYSFS
1229 kset_unregister(net->queues_kset);
1230#endif
1231}
1232
1233static bool net_current_may_mount(void)
1234{
1235 struct net *net = current->nsproxy->net_ns;
1236
1237 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1238}
1239
1240static void *net_grab_current_ns(void)
1241{
1242 struct net *ns = current->nsproxy->net_ns;
1243#ifdef CONFIG_NET_NS
1244 if (ns)
1245 atomic_inc(&ns->passive);
1246#endif
1247 return ns;
1248}
1249
1250static const void *net_initial_ns(void)
1251{
1252 return &init_net;
1253}
1254
1255static const void *net_netlink_ns(struct sock *sk)
1256{
1257 return sock_net(sk);
1258}
1259
1260struct kobj_ns_type_operations net_ns_type_operations = {
1261 .type = KOBJ_NS_TYPE_NET,
1262 .current_may_mount = net_current_may_mount,
1263 .grab_current_ns = net_grab_current_ns,
1264 .netlink_ns = net_netlink_ns,
1265 .initial_ns = net_initial_ns,
1266 .drop_ns = net_drop_ns,
1267};
1268EXPORT_SYMBOL_GPL(net_ns_type_operations);
1269
1270static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1271{
1272 struct net_device *dev = to_net_dev(d);
1273 int retval;
1274
1275
1276 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1277 if (retval)
1278 goto exit;
1279
1280
1281
1282
1283 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1284
1285exit:
1286 return retval;
1287}
1288
1289
1290
1291
1292
1293static void netdev_release(struct device *d)
1294{
1295 struct net_device *dev = to_net_dev(d);
1296
1297 BUG_ON(dev->reg_state != NETREG_RELEASED);
1298
1299 kfree(dev->ifalias);
1300 netdev_freemem(dev);
1301}
1302
1303static const void *net_namespace(struct device *d)
1304{
1305 struct net_device *dev;
1306 dev = container_of(d, struct net_device, dev);
1307 return dev_net(dev);
1308}
1309
1310static struct class net_class = {
1311 .name = "net",
1312 .dev_release = netdev_release,
1313 .dev_groups = net_class_groups,
1314 .dev_uevent = netdev_uevent,
1315 .ns_type = &net_ns_type_operations,
1316 .namespace = net_namespace,
1317};
1318
1319
1320
1321
1322void netdev_unregister_kobject(struct net_device * net)
1323{
1324 struct device *dev = &(net->dev);
1325
1326 kobject_get(&dev->kobj);
1327
1328 remove_queue_kobjects(net);
1329
1330 pm_runtime_set_memalloc_noio(dev, false);
1331
1332 device_del(dev);
1333}
1334
1335
1336int netdev_register_kobject(struct net_device *net)
1337{
1338 struct device *dev = &(net->dev);
1339 const struct attribute_group **groups = net->sysfs_groups;
1340 int error = 0;
1341
1342 device_initialize(dev);
1343 dev->class = &net_class;
1344 dev->platform_data = net;
1345 dev->groups = groups;
1346
1347 dev_set_name(dev, "%s", net->name);
1348
1349#ifdef CONFIG_SYSFS
1350
1351 if (*groups)
1352 groups++;
1353
1354 *groups++ = &netstat_group;
1355
1356#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1357 if (net->ieee80211_ptr)
1358 *groups++ = &wireless_group;
1359#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1360 else if (net->wireless_handlers)
1361 *groups++ = &wireless_group;
1362#endif
1363#endif
1364#endif
1365
1366 error = device_add(dev);
1367 if (error)
1368 return error;
1369
1370 error = register_queue_kobjects(net);
1371 if (error) {
1372 device_del(dev);
1373 return error;
1374 }
1375
1376 pm_runtime_set_memalloc_noio(dev, true);
1377
1378 return error;
1379}
1380
1381int netdev_class_create_file_ns(struct class_attribute *class_attr,
1382 const void *ns)
1383{
1384 return class_create_file_ns(&net_class, class_attr, ns);
1385}
1386EXPORT_SYMBOL(netdev_class_create_file_ns);
1387
1388void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1389 const void *ns)
1390{
1391 class_remove_file_ns(&net_class, class_attr, ns);
1392}
1393EXPORT_SYMBOL(netdev_class_remove_file_ns);
1394
1395int __init netdev_kobject_init(void)
1396{
1397 kobj_ns_type_register(&net_ns_type_operations);
1398 return class_register(&net_class);
1399}
1400