1
2
3
4
5
6
7
8
9
10
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/slab.h>
17#include <linux/nsproxy.h>
18#include <net/sock.h>
19#include <net/net_namespace.h>
20#include <linux/rtnetlink.h>
21#include <linux/vmalloc.h>
22#include <linux/export.h>
23#include <linux/jiffies.h>
24#include <linux/pm_runtime.h>
25
26#include "net-sysfs.h"
27
28#ifdef CONFIG_SYSFS
29static const char fmt_hex[] = "%#x\n";
30static const char fmt_long_hex[] = "%#lx\n";
31static const char fmt_dec[] = "%d\n";
32static const char fmt_udec[] = "%u\n";
33static const char fmt_ulong[] = "%lu\n";
34static const char fmt_u64[] = "%llu\n";
35
36static inline int dev_isalive(const struct net_device *dev)
37{
38 return dev->reg_state <= NETREG_REGISTERED;
39}
40
41
42static ssize_t netdev_show(const struct device *dev,
43 struct device_attribute *attr, char *buf,
44 ssize_t (*format)(const struct net_device *, char *))
45{
46 struct net_device *net = to_net_dev(dev);
47 ssize_t ret = -EINVAL;
48
49 read_lock(&dev_base_lock);
50 if (dev_isalive(net))
51 ret = (*format)(net, buf);
52 read_unlock(&dev_base_lock);
53
54 return ret;
55}
56
57
58#define NETDEVICE_SHOW(field, format_string) \
59static ssize_t format_##field(const struct net_device *net, char *buf) \
60{ \
61 return sprintf(buf, format_string, net->field); \
62} \
63static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
65{ \
66 return netdev_show(dev, attr, buf, format_##field); \
67} \
68
69#define NETDEVICE_SHOW_RO(field, format_string) \
70NETDEVICE_SHOW(field, format_string); \
71static DEVICE_ATTR_RO(field)
72
73#define NETDEVICE_SHOW_RW(field, format_string) \
74NETDEVICE_SHOW(field, format_string); \
75static DEVICE_ATTR_RW(field)
76
77
78static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t len,
80 int (*set)(struct net_device *, unsigned long))
81{
82 struct net_device *netdev = to_net_dev(dev);
83 struct net *net = dev_net(netdev);
84 unsigned long new;
85 int ret = -EINVAL;
86
87 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
88 return -EPERM;
89
90 ret = kstrtoul(buf, 0, &new);
91 if (ret)
92 goto err;
93
94 if (!rtnl_trylock())
95 return restart_syscall();
96
97 if (dev_isalive(netdev)) {
98 if ((ret = (*set)(netdev, new)) == 0)
99 ret = len;
100 }
101 rtnl_unlock();
102 err:
103 return ret;
104}
105
106NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
108NETDEVICE_SHOW_RO(addr_len, fmt_dec);
109NETDEVICE_SHOW_RO(iflink, fmt_dec);
110NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111NETDEVICE_SHOW_RO(type, fmt_dec);
112NETDEVICE_SHOW_RO(link_mode, fmt_dec);
113
114
115static ssize_t address_show(struct device *dev, struct device_attribute *attr,
116 char *buf)
117{
118 struct net_device *net = to_net_dev(dev);
119 ssize_t ret = -EINVAL;
120
121 read_lock(&dev_base_lock);
122 if (dev_isalive(net))
123 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
124 read_unlock(&dev_base_lock);
125 return ret;
126}
127static DEVICE_ATTR_RO(address);
128
129static ssize_t broadcast_show(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct net_device *net = to_net_dev(dev);
133 if (dev_isalive(net))
134 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
135 return -EINVAL;
136}
137static DEVICE_ATTR_RO(broadcast);
138
139static int change_carrier(struct net_device *net, unsigned long new_carrier)
140{
141 if (!netif_running(net))
142 return -EINVAL;
143 return dev_change_carrier(net, (bool) new_carrier);
144}
145
146static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
147 const char *buf, size_t len)
148{
149 return netdev_store(dev, attr, buf, len, change_carrier);
150}
151
152static ssize_t carrier_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
154{
155 struct net_device *netdev = to_net_dev(dev);
156 if (netif_running(netdev)) {
157 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
158 }
159 return -EINVAL;
160}
161static DEVICE_ATTR_RW(carrier);
162
163static ssize_t speed_show(struct device *dev,
164 struct device_attribute *attr, char *buf)
165{
166 struct net_device *netdev = to_net_dev(dev);
167 int ret = -EINVAL;
168
169 if (!rtnl_trylock())
170 return restart_syscall();
171
172 if (netif_running(netdev)) {
173 struct ethtool_cmd cmd;
174 if (!__ethtool_get_settings(netdev, &cmd))
175 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
176 }
177 rtnl_unlock();
178 return ret;
179}
180static DEVICE_ATTR_RO(speed);
181
182static ssize_t duplex_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 struct net_device *netdev = to_net_dev(dev);
186 int ret = -EINVAL;
187
188 if (!rtnl_trylock())
189 return restart_syscall();
190
191 if (netif_running(netdev)) {
192 struct ethtool_cmd cmd;
193 if (!__ethtool_get_settings(netdev, &cmd)) {
194 const char *duplex;
195 switch (cmd.duplex) {
196 case DUPLEX_HALF:
197 duplex = "half";
198 break;
199 case DUPLEX_FULL:
200 duplex = "full";
201 break;
202 default:
203 duplex = "unknown";
204 break;
205 }
206 ret = sprintf(buf, "%s\n", duplex);
207 }
208 }
209 rtnl_unlock();
210 return ret;
211}
212static DEVICE_ATTR_RO(duplex);
213
214static ssize_t dormant_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 struct net_device *netdev = to_net_dev(dev);
218
219 if (netif_running(netdev))
220 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
221
222 return -EINVAL;
223}
224static DEVICE_ATTR_RO(dormant);
225
226static const char *const operstates[] = {
227 "unknown",
228 "notpresent",
229 "down",
230 "lowerlayerdown",
231 "testing",
232 "dormant",
233 "up"
234};
235
236static ssize_t operstate_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
238{
239 const struct net_device *netdev = to_net_dev(dev);
240 unsigned char operstate;
241
242 read_lock(&dev_base_lock);
243 operstate = netdev->operstate;
244 if (!netif_running(netdev))
245 operstate = IF_OPER_DOWN;
246 read_unlock(&dev_base_lock);
247
248 if (operstate >= ARRAY_SIZE(operstates))
249 return -EINVAL;
250
251 return sprintf(buf, "%s\n", operstates[operstate]);
252}
253static DEVICE_ATTR_RO(operstate);
254
255
256
257static int change_mtu(struct net_device *net, unsigned long new_mtu)
258{
259 return dev_set_mtu(net, (int) new_mtu);
260}
261
262static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t len)
264{
265 return netdev_store(dev, attr, buf, len, change_mtu);
266}
267NETDEVICE_SHOW_RW(mtu, fmt_dec);
268
269static int change_flags(struct net_device *net, unsigned long new_flags)
270{
271 return dev_change_flags(net, (unsigned int) new_flags);
272}
273
274static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
275 const char *buf, size_t len)
276{
277 return netdev_store(dev, attr, buf, len, change_flags);
278}
279NETDEVICE_SHOW_RW(flags, fmt_hex);
280
281static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
282{
283 net->tx_queue_len = new_len;
284 return 0;
285}
286
287static ssize_t tx_queue_len_store(struct device *dev,
288 struct device_attribute *attr,
289 const char *buf, size_t len)
290{
291 if (!capable(CAP_NET_ADMIN))
292 return -EPERM;
293
294 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
295}
296NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
297
298static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
299 const char *buf, size_t len)
300{
301 struct net_device *netdev = to_net_dev(dev);
302 struct net *net = dev_net(netdev);
303 size_t count = len;
304 ssize_t ret;
305
306 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
307 return -EPERM;
308
309
310 if (len > 0 && buf[len - 1] == '\n')
311 --count;
312
313 if (!rtnl_trylock())
314 return restart_syscall();
315 ret = dev_set_alias(netdev, buf, count);
316 rtnl_unlock();
317
318 return ret < 0 ? ret : len;
319}
320
321static ssize_t ifalias_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 const struct net_device *netdev = to_net_dev(dev);
325 ssize_t ret = 0;
326
327 if (!rtnl_trylock())
328 return restart_syscall();
329 if (netdev->ifalias)
330 ret = sprintf(buf, "%s\n", netdev->ifalias);
331 rtnl_unlock();
332 return ret;
333}
334static DEVICE_ATTR_RW(ifalias);
335
336static int change_group(struct net_device *net, unsigned long new_group)
337{
338 dev_set_group(net, (int) new_group);
339 return 0;
340}
341
342static ssize_t group_store(struct device *dev, struct device_attribute *attr,
343 const char *buf, size_t len)
344{
345 return netdev_store(dev, attr, buf, len, change_group);
346}
347NETDEVICE_SHOW(group, fmt_dec);
348static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
349
350static ssize_t phys_port_id_show(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct net_device *netdev = to_net_dev(dev);
354 ssize_t ret = -EINVAL;
355
356 if (!rtnl_trylock())
357 return restart_syscall();
358
359 if (dev_isalive(netdev)) {
360 struct netdev_phys_port_id ppid;
361
362 ret = dev_get_phys_port_id(netdev, &ppid);
363 if (!ret)
364 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
365 }
366 rtnl_unlock();
367
368 return ret;
369}
370static DEVICE_ATTR_RO(phys_port_id);
371
372static struct attribute *net_class_attrs[] = {
373 &dev_attr_netdev_group.attr,
374 &dev_attr_type.attr,
375 &dev_attr_dev_id.attr,
376 &dev_attr_iflink.attr,
377 &dev_attr_ifindex.attr,
378 &dev_attr_addr_assign_type.attr,
379 &dev_attr_addr_len.attr,
380 &dev_attr_link_mode.attr,
381 &dev_attr_address.attr,
382 &dev_attr_broadcast.attr,
383 &dev_attr_speed.attr,
384 &dev_attr_duplex.attr,
385 &dev_attr_dormant.attr,
386 &dev_attr_operstate.attr,
387 &dev_attr_ifalias.attr,
388 &dev_attr_carrier.attr,
389 &dev_attr_mtu.attr,
390 &dev_attr_flags.attr,
391 &dev_attr_tx_queue_len.attr,
392 &dev_attr_phys_port_id.attr,
393 NULL,
394};
395ATTRIBUTE_GROUPS(net_class);
396
397
398static ssize_t netstat_show(const struct device *d,
399 struct device_attribute *attr, char *buf,
400 unsigned long offset)
401{
402 struct net_device *dev = to_net_dev(d);
403 ssize_t ret = -EINVAL;
404
405 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
406 offset % sizeof(u64) != 0);
407
408 read_lock(&dev_base_lock);
409 if (dev_isalive(dev)) {
410 struct rtnl_link_stats64 temp;
411 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
412
413 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
414 }
415 read_unlock(&dev_base_lock);
416 return ret;
417}
418
419
420#define NETSTAT_ENTRY(name) \
421static ssize_t name##_show(struct device *d, \
422 struct device_attribute *attr, char *buf) \
423{ \
424 return netstat_show(d, attr, buf, \
425 offsetof(struct rtnl_link_stats64, name)); \
426} \
427static DEVICE_ATTR_RO(name)
428
429NETSTAT_ENTRY(rx_packets);
430NETSTAT_ENTRY(tx_packets);
431NETSTAT_ENTRY(rx_bytes);
432NETSTAT_ENTRY(tx_bytes);
433NETSTAT_ENTRY(rx_errors);
434NETSTAT_ENTRY(tx_errors);
435NETSTAT_ENTRY(rx_dropped);
436NETSTAT_ENTRY(tx_dropped);
437NETSTAT_ENTRY(multicast);
438NETSTAT_ENTRY(collisions);
439NETSTAT_ENTRY(rx_length_errors);
440NETSTAT_ENTRY(rx_over_errors);
441NETSTAT_ENTRY(rx_crc_errors);
442NETSTAT_ENTRY(rx_frame_errors);
443NETSTAT_ENTRY(rx_fifo_errors);
444NETSTAT_ENTRY(rx_missed_errors);
445NETSTAT_ENTRY(tx_aborted_errors);
446NETSTAT_ENTRY(tx_carrier_errors);
447NETSTAT_ENTRY(tx_fifo_errors);
448NETSTAT_ENTRY(tx_heartbeat_errors);
449NETSTAT_ENTRY(tx_window_errors);
450NETSTAT_ENTRY(rx_compressed);
451NETSTAT_ENTRY(tx_compressed);
452
453static struct attribute *netstat_attrs[] = {
454 &dev_attr_rx_packets.attr,
455 &dev_attr_tx_packets.attr,
456 &dev_attr_rx_bytes.attr,
457 &dev_attr_tx_bytes.attr,
458 &dev_attr_rx_errors.attr,
459 &dev_attr_tx_errors.attr,
460 &dev_attr_rx_dropped.attr,
461 &dev_attr_tx_dropped.attr,
462 &dev_attr_multicast.attr,
463 &dev_attr_collisions.attr,
464 &dev_attr_rx_length_errors.attr,
465 &dev_attr_rx_over_errors.attr,
466 &dev_attr_rx_crc_errors.attr,
467 &dev_attr_rx_frame_errors.attr,
468 &dev_attr_rx_fifo_errors.attr,
469 &dev_attr_rx_missed_errors.attr,
470 &dev_attr_tx_aborted_errors.attr,
471 &dev_attr_tx_carrier_errors.attr,
472 &dev_attr_tx_fifo_errors.attr,
473 &dev_attr_tx_heartbeat_errors.attr,
474 &dev_attr_tx_window_errors.attr,
475 &dev_attr_rx_compressed.attr,
476 &dev_attr_tx_compressed.attr,
477 NULL
478};
479
480
481static struct attribute_group netstat_group = {
482 .name = "statistics",
483 .attrs = netstat_attrs,
484};
485
486#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
487static struct attribute *wireless_attrs[] = {
488 NULL
489};
490
491static struct attribute_group wireless_group = {
492 .name = "wireless",
493 .attrs = wireless_attrs,
494};
495#endif
496
497#else
498#define net_class_groups NULL
499#endif
500
501#ifdef CONFIG_RPS
502
503
504
505struct rx_queue_attribute {
506 struct attribute attr;
507 ssize_t (*show)(struct netdev_rx_queue *queue,
508 struct rx_queue_attribute *attr, char *buf);
509 ssize_t (*store)(struct netdev_rx_queue *queue,
510 struct rx_queue_attribute *attr, const char *buf, size_t len);
511};
512#define to_rx_queue_attr(_attr) container_of(_attr, \
513 struct rx_queue_attribute, attr)
514
515#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
516
517static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
518 char *buf)
519{
520 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
521 struct netdev_rx_queue *queue = to_rx_queue(kobj);
522
523 if (!attribute->show)
524 return -EIO;
525
526 return attribute->show(queue, attribute, buf);
527}
528
529static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
530 const char *buf, size_t count)
531{
532 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
533 struct netdev_rx_queue *queue = to_rx_queue(kobj);
534
535 if (!attribute->store)
536 return -EIO;
537
538 return attribute->store(queue, attribute, buf, count);
539}
540
541static const struct sysfs_ops rx_queue_sysfs_ops = {
542 .show = rx_queue_attr_show,
543 .store = rx_queue_attr_store,
544};
545
546static ssize_t show_rps_map(struct netdev_rx_queue *queue,
547 struct rx_queue_attribute *attribute, char *buf)
548{
549 struct rps_map *map;
550 cpumask_var_t mask;
551 size_t len = 0;
552 int i;
553
554 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
555 return -ENOMEM;
556
557 rcu_read_lock();
558 map = rcu_dereference(queue->rps_map);
559 if (map)
560 for (i = 0; i < map->len; i++)
561 cpumask_set_cpu(map->cpus[i], mask);
562
563 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
564 if (PAGE_SIZE - len < 3) {
565 rcu_read_unlock();
566 free_cpumask_var(mask);
567 return -EINVAL;
568 }
569 rcu_read_unlock();
570
571 free_cpumask_var(mask);
572 len += sprintf(buf + len, "\n");
573 return len;
574}
575
576static ssize_t store_rps_map(struct netdev_rx_queue *queue,
577 struct rx_queue_attribute *attribute,
578 const char *buf, size_t len)
579{
580 struct rps_map *old_map, *map;
581 cpumask_var_t mask;
582 int err, cpu, i;
583 static DEFINE_SPINLOCK(rps_map_lock);
584
585 if (!capable(CAP_NET_ADMIN))
586 return -EPERM;
587
588 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
589 return -ENOMEM;
590
591 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
592 if (err) {
593 free_cpumask_var(mask);
594 return err;
595 }
596
597 map = kzalloc(max_t(unsigned int,
598 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
599 GFP_KERNEL);
600 if (!map) {
601 free_cpumask_var(mask);
602 return -ENOMEM;
603 }
604
605 i = 0;
606 for_each_cpu_and(cpu, mask, cpu_online_mask)
607 map->cpus[i++] = cpu;
608
609 if (i)
610 map->len = i;
611 else {
612 kfree(map);
613 map = NULL;
614 }
615
616 spin_lock(&rps_map_lock);
617 old_map = rcu_dereference_protected(queue->rps_map,
618 lockdep_is_held(&rps_map_lock));
619 rcu_assign_pointer(queue->rps_map, map);
620 spin_unlock(&rps_map_lock);
621
622 if (map)
623 static_key_slow_inc(&rps_needed);
624 if (old_map) {
625 kfree_rcu(old_map, rcu);
626 static_key_slow_dec(&rps_needed);
627 }
628 free_cpumask_var(mask);
629 return len;
630}
631
632static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
633 struct rx_queue_attribute *attr,
634 char *buf)
635{
636 struct rps_dev_flow_table *flow_table;
637 unsigned long val = 0;
638
639 rcu_read_lock();
640 flow_table = rcu_dereference(queue->rps_flow_table);
641 if (flow_table)
642 val = (unsigned long)flow_table->mask + 1;
643 rcu_read_unlock();
644
645 return sprintf(buf, "%lu\n", val);
646}
647
648static void rps_dev_flow_table_release(struct rcu_head *rcu)
649{
650 struct rps_dev_flow_table *table = container_of(rcu,
651 struct rps_dev_flow_table, rcu);
652 vfree(table);
653}
654
655static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
656 struct rx_queue_attribute *attr,
657 const char *buf, size_t len)
658{
659 unsigned long mask, count;
660 struct rps_dev_flow_table *table, *old_table;
661 static DEFINE_SPINLOCK(rps_dev_flow_lock);
662 int rc;
663
664 if (!capable(CAP_NET_ADMIN))
665 return -EPERM;
666
667 rc = kstrtoul(buf, 0, &count);
668 if (rc < 0)
669 return rc;
670
671 if (count) {
672 mask = count - 1;
673
674
675
676 while ((mask | (mask >> 1)) != mask)
677 mask |= (mask >> 1);
678
679
680
681
682#if BITS_PER_LONG > 32
683 if (mask > (unsigned long)(u32)mask)
684 return -EINVAL;
685#else
686 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
687 / sizeof(struct rps_dev_flow)) {
688
689 return -EINVAL;
690 }
691#endif
692 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
693 if (!table)
694 return -ENOMEM;
695
696 table->mask = mask;
697 for (count = 0; count <= mask; count++)
698 table->flows[count].cpu = RPS_NO_CPU;
699 } else
700 table = NULL;
701
702 spin_lock(&rps_dev_flow_lock);
703 old_table = rcu_dereference_protected(queue->rps_flow_table,
704 lockdep_is_held(&rps_dev_flow_lock));
705 rcu_assign_pointer(queue->rps_flow_table, table);
706 spin_unlock(&rps_dev_flow_lock);
707
708 if (old_table)
709 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
710
711 return len;
712}
713
714static struct rx_queue_attribute rps_cpus_attribute =
715 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
716
717
718static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
719 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
720 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
721
722static struct attribute *rx_queue_default_attrs[] = {
723 &rps_cpus_attribute.attr,
724 &rps_dev_flow_table_cnt_attribute.attr,
725 NULL
726};
727
728static void rx_queue_release(struct kobject *kobj)
729{
730 struct netdev_rx_queue *queue = to_rx_queue(kobj);
731 struct rps_map *map;
732 struct rps_dev_flow_table *flow_table;
733
734
735 map = rcu_dereference_protected(queue->rps_map, 1);
736 if (map) {
737 RCU_INIT_POINTER(queue->rps_map, NULL);
738 kfree_rcu(map, rcu);
739 }
740
741 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
742 if (flow_table) {
743 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
744 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
745 }
746
747 memset(kobj, 0, sizeof(*kobj));
748 dev_put(queue->dev);
749}
750
751static struct kobj_type rx_queue_ktype = {
752 .sysfs_ops = &rx_queue_sysfs_ops,
753 .release = rx_queue_release,
754 .default_attrs = rx_queue_default_attrs,
755};
756
757static int rx_queue_add_kobject(struct net_device *net, int index)
758{
759 struct netdev_rx_queue *queue = net->_rx + index;
760 struct kobject *kobj = &queue->kobj;
761 int error = 0;
762
763 kobj->kset = net->queues_kset;
764 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
765 "rx-%u", index);
766 if (error) {
767 kobject_put(kobj);
768 return error;
769 }
770
771 kobject_uevent(kobj, KOBJ_ADD);
772 dev_hold(queue->dev);
773
774 return error;
775}
776#endif
777
778int
779net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
780{
781#ifdef CONFIG_RPS
782 int i;
783 int error = 0;
784
785 for (i = old_num; i < new_num; i++) {
786 error = rx_queue_add_kobject(net, i);
787 if (error) {
788 new_num = old_num;
789 break;
790 }
791 }
792
793 while (--i >= new_num)
794 kobject_put(&net->_rx[i].kobj);
795
796 return error;
797#else
798 return 0;
799#endif
800}
801
802#ifdef CONFIG_SYSFS
803
804
805
806struct netdev_queue_attribute {
807 struct attribute attr;
808 ssize_t (*show)(struct netdev_queue *queue,
809 struct netdev_queue_attribute *attr, char *buf);
810 ssize_t (*store)(struct netdev_queue *queue,
811 struct netdev_queue_attribute *attr, const char *buf, size_t len);
812};
813#define to_netdev_queue_attr(_attr) container_of(_attr, \
814 struct netdev_queue_attribute, attr)
815
816#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
817
818static ssize_t netdev_queue_attr_show(struct kobject *kobj,
819 struct attribute *attr, char *buf)
820{
821 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
822 struct netdev_queue *queue = to_netdev_queue(kobj);
823
824 if (!attribute->show)
825 return -EIO;
826
827 return attribute->show(queue, attribute, buf);
828}
829
830static ssize_t netdev_queue_attr_store(struct kobject *kobj,
831 struct attribute *attr,
832 const char *buf, size_t count)
833{
834 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
835 struct netdev_queue *queue = to_netdev_queue(kobj);
836
837 if (!attribute->store)
838 return -EIO;
839
840 return attribute->store(queue, attribute, buf, count);
841}
842
843static const struct sysfs_ops netdev_queue_sysfs_ops = {
844 .show = netdev_queue_attr_show,
845 .store = netdev_queue_attr_store,
846};
847
848static ssize_t show_trans_timeout(struct netdev_queue *queue,
849 struct netdev_queue_attribute *attribute,
850 char *buf)
851{
852 unsigned long trans_timeout;
853
854 spin_lock_irq(&queue->_xmit_lock);
855 trans_timeout = queue->trans_timeout;
856 spin_unlock_irq(&queue->_xmit_lock);
857
858 return sprintf(buf, "%lu", trans_timeout);
859}
860
861static struct netdev_queue_attribute queue_trans_timeout =
862 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
863
864#ifdef CONFIG_BQL
865
866
867
868static ssize_t bql_show(char *buf, unsigned int value)
869{
870 return sprintf(buf, "%u\n", value);
871}
872
873static ssize_t bql_set(const char *buf, const size_t count,
874 unsigned int *pvalue)
875{
876 unsigned int value;
877 int err;
878
879 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
880 value = DQL_MAX_LIMIT;
881 else {
882 err = kstrtouint(buf, 10, &value);
883 if (err < 0)
884 return err;
885 if (value > DQL_MAX_LIMIT)
886 return -EINVAL;
887 }
888
889 *pvalue = value;
890
891 return count;
892}
893
894static ssize_t bql_show_hold_time(struct netdev_queue *queue,
895 struct netdev_queue_attribute *attr,
896 char *buf)
897{
898 struct dql *dql = &queue->dql;
899
900 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
901}
902
903static ssize_t bql_set_hold_time(struct netdev_queue *queue,
904 struct netdev_queue_attribute *attribute,
905 const char *buf, size_t len)
906{
907 struct dql *dql = &queue->dql;
908 unsigned int value;
909 int err;
910
911 err = kstrtouint(buf, 10, &value);
912 if (err < 0)
913 return err;
914
915 dql->slack_hold_time = msecs_to_jiffies(value);
916
917 return len;
918}
919
920static struct netdev_queue_attribute bql_hold_time_attribute =
921 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
922 bql_set_hold_time);
923
924static ssize_t bql_show_inflight(struct netdev_queue *queue,
925 struct netdev_queue_attribute *attr,
926 char *buf)
927{
928 struct dql *dql = &queue->dql;
929
930 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
931}
932
933static struct netdev_queue_attribute bql_inflight_attribute =
934 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
935
936#define BQL_ATTR(NAME, FIELD) \
937static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
938 struct netdev_queue_attribute *attr, \
939 char *buf) \
940{ \
941 return bql_show(buf, queue->dql.FIELD); \
942} \
943 \
944static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
945 struct netdev_queue_attribute *attr, \
946 const char *buf, size_t len) \
947{ \
948 return bql_set(buf, len, &queue->dql.FIELD); \
949} \
950 \
951static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
952 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
953 bql_set_ ## NAME);
954
955BQL_ATTR(limit, limit)
956BQL_ATTR(limit_max, max_limit)
957BQL_ATTR(limit_min, min_limit)
958
959static struct attribute *dql_attrs[] = {
960 &bql_limit_attribute.attr,
961 &bql_limit_max_attribute.attr,
962 &bql_limit_min_attribute.attr,
963 &bql_hold_time_attribute.attr,
964 &bql_inflight_attribute.attr,
965 NULL
966};
967
968static struct attribute_group dql_group = {
969 .name = "byte_queue_limits",
970 .attrs = dql_attrs,
971};
972#endif
973
974#ifdef CONFIG_XPS
975static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
976{
977 struct net_device *dev = queue->dev;
978 int i;
979
980 for (i = 0; i < dev->num_tx_queues; i++)
981 if (queue == &dev->_tx[i])
982 break;
983
984 BUG_ON(i >= dev->num_tx_queues);
985
986 return i;
987}
988
989
990static ssize_t show_xps_map(struct netdev_queue *queue,
991 struct netdev_queue_attribute *attribute, char *buf)
992{
993 struct net_device *dev = queue->dev;
994 struct xps_dev_maps *dev_maps;
995 cpumask_var_t mask;
996 unsigned long index;
997 size_t len = 0;
998 int i;
999
1000 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1001 return -ENOMEM;
1002
1003 index = get_netdev_queue_index(queue);
1004
1005 rcu_read_lock();
1006 dev_maps = rcu_dereference(dev->xps_maps);
1007 if (dev_maps) {
1008 for_each_possible_cpu(i) {
1009 struct xps_map *map =
1010 rcu_dereference(dev_maps->cpu_map[i]);
1011 if (map) {
1012 int j;
1013 for (j = 0; j < map->len; j++) {
1014 if (map->queues[j] == index) {
1015 cpumask_set_cpu(i, mask);
1016 break;
1017 }
1018 }
1019 }
1020 }
1021 }
1022 rcu_read_unlock();
1023
1024 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1025 if (PAGE_SIZE - len < 3) {
1026 free_cpumask_var(mask);
1027 return -EINVAL;
1028 }
1029
1030 free_cpumask_var(mask);
1031 len += sprintf(buf + len, "\n");
1032 return len;
1033}
1034
1035static ssize_t store_xps_map(struct netdev_queue *queue,
1036 struct netdev_queue_attribute *attribute,
1037 const char *buf, size_t len)
1038{
1039 struct net_device *dev = queue->dev;
1040 unsigned long index;
1041 cpumask_var_t mask;
1042 int err;
1043
1044 if (!capable(CAP_NET_ADMIN))
1045 return -EPERM;
1046
1047 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1048 return -ENOMEM;
1049
1050 index = get_netdev_queue_index(queue);
1051
1052 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1053 if (err) {
1054 free_cpumask_var(mask);
1055 return err;
1056 }
1057
1058 err = netif_set_xps_queue(dev, mask, index);
1059
1060 free_cpumask_var(mask);
1061
1062 return err ? : len;
1063}
1064
1065static struct netdev_queue_attribute xps_cpus_attribute =
1066 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1067#endif
1068
1069static struct attribute *netdev_queue_default_attrs[] = {
1070 &queue_trans_timeout.attr,
1071#ifdef CONFIG_XPS
1072 &xps_cpus_attribute.attr,
1073#endif
1074 NULL
1075};
1076
1077static void netdev_queue_release(struct kobject *kobj)
1078{
1079 struct netdev_queue *queue = to_netdev_queue(kobj);
1080
1081 memset(kobj, 0, sizeof(*kobj));
1082 dev_put(queue->dev);
1083}
1084
1085static struct kobj_type netdev_queue_ktype = {
1086 .sysfs_ops = &netdev_queue_sysfs_ops,
1087 .release = netdev_queue_release,
1088 .default_attrs = netdev_queue_default_attrs,
1089};
1090
1091static int netdev_queue_add_kobject(struct net_device *net, int index)
1092{
1093 struct netdev_queue *queue = net->_tx + index;
1094 struct kobject *kobj = &queue->kobj;
1095 int error = 0;
1096
1097 kobj->kset = net->queues_kset;
1098 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1099 "tx-%u", index);
1100 if (error)
1101 goto exit;
1102
1103#ifdef CONFIG_BQL
1104 error = sysfs_create_group(kobj, &dql_group);
1105 if (error)
1106 goto exit;
1107#endif
1108
1109 kobject_uevent(kobj, KOBJ_ADD);
1110 dev_hold(queue->dev);
1111
1112 return 0;
1113exit:
1114 kobject_put(kobj);
1115 return error;
1116}
1117#endif
1118
1119int
1120netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1121{
1122#ifdef CONFIG_SYSFS
1123 int i;
1124 int error = 0;
1125
1126 for (i = old_num; i < new_num; i++) {
1127 error = netdev_queue_add_kobject(net, i);
1128 if (error) {
1129 new_num = old_num;
1130 break;
1131 }
1132 }
1133
1134 while (--i >= new_num) {
1135 struct netdev_queue *queue = net->_tx + i;
1136
1137#ifdef CONFIG_BQL
1138 sysfs_remove_group(&queue->kobj, &dql_group);
1139#endif
1140 kobject_put(&queue->kobj);
1141 }
1142
1143 return error;
1144#else
1145 return 0;
1146#endif
1147}
1148
1149static int register_queue_kobjects(struct net_device *net)
1150{
1151 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1152
1153#ifdef CONFIG_SYSFS
1154 net->queues_kset = kset_create_and_add("queues",
1155 NULL, &net->dev.kobj);
1156 if (!net->queues_kset)
1157 return -ENOMEM;
1158#endif
1159
1160#ifdef CONFIG_RPS
1161 real_rx = net->real_num_rx_queues;
1162#endif
1163 real_tx = net->real_num_tx_queues;
1164
1165 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1166 if (error)
1167 goto error;
1168 rxq = real_rx;
1169
1170 error = netdev_queue_update_kobjects(net, 0, real_tx);
1171 if (error)
1172 goto error;
1173 txq = real_tx;
1174
1175 return 0;
1176
1177error:
1178 netdev_queue_update_kobjects(net, txq, 0);
1179 net_rx_queue_update_kobjects(net, rxq, 0);
1180 return error;
1181}
1182
1183static void remove_queue_kobjects(struct net_device *net)
1184{
1185 int real_rx = 0, real_tx = 0;
1186
1187#ifdef CONFIG_RPS
1188 real_rx = net->real_num_rx_queues;
1189#endif
1190 real_tx = net->real_num_tx_queues;
1191
1192 net_rx_queue_update_kobjects(net, real_rx, 0);
1193 netdev_queue_update_kobjects(net, real_tx, 0);
1194#ifdef CONFIG_SYSFS
1195 kset_unregister(net->queues_kset);
1196#endif
1197}
1198
1199static bool net_current_may_mount(void)
1200{
1201 struct net *net = current->nsproxy->net_ns;
1202
1203 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1204}
1205
1206static void *net_grab_current_ns(void)
1207{
1208 struct net *ns = current->nsproxy->net_ns;
1209#ifdef CONFIG_NET_NS
1210 if (ns)
1211 atomic_inc(&ns->passive);
1212#endif
1213 return ns;
1214}
1215
1216static const void *net_initial_ns(void)
1217{
1218 return &init_net;
1219}
1220
1221static const void *net_netlink_ns(struct sock *sk)
1222{
1223 return sock_net(sk);
1224}
1225
1226struct kobj_ns_type_operations net_ns_type_operations = {
1227 .type = KOBJ_NS_TYPE_NET,
1228 .current_may_mount = net_current_may_mount,
1229 .grab_current_ns = net_grab_current_ns,
1230 .netlink_ns = net_netlink_ns,
1231 .initial_ns = net_initial_ns,
1232 .drop_ns = net_drop_ns,
1233};
1234EXPORT_SYMBOL_GPL(net_ns_type_operations);
1235
1236static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1237{
1238 struct net_device *dev = to_net_dev(d);
1239 int retval;
1240
1241
1242 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1243 if (retval)
1244 goto exit;
1245
1246
1247
1248
1249 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1250
1251exit:
1252 return retval;
1253}
1254
1255
1256
1257
1258
1259static void netdev_release(struct device *d)
1260{
1261 struct net_device *dev = to_net_dev(d);
1262
1263 BUG_ON(dev->reg_state != NETREG_RELEASED);
1264
1265 kfree(dev->ifalias);
1266 kfree((char *)dev - dev->padded);
1267}
1268
1269static const void *net_namespace(struct device *d)
1270{
1271 struct net_device *dev;
1272 dev = container_of(d, struct net_device, dev);
1273 return dev_net(dev);
1274}
1275
1276static struct class net_class = {
1277 .name = "net",
1278 .dev_release = netdev_release,
1279 .dev_groups = net_class_groups,
1280 .dev_uevent = netdev_uevent,
1281 .ns_type = &net_ns_type_operations,
1282 .namespace = net_namespace,
1283};
1284
1285
1286
1287
1288void netdev_unregister_kobject(struct net_device * net)
1289{
1290 struct device *dev = &(net->dev);
1291
1292 kobject_get(&dev->kobj);
1293
1294 remove_queue_kobjects(net);
1295
1296 pm_runtime_set_memalloc_noio(dev, false);
1297
1298 device_del(dev);
1299}
1300
1301
1302int netdev_register_kobject(struct net_device *net)
1303{
1304 struct device *dev = &(net->dev);
1305 const struct attribute_group **groups = net->sysfs_groups;
1306 int error = 0;
1307
1308 device_initialize(dev);
1309 dev->class = &net_class;
1310 dev->platform_data = net;
1311 dev->groups = groups;
1312
1313 dev_set_name(dev, "%s", net->name);
1314
1315#ifdef CONFIG_SYSFS
1316
1317 if (*groups)
1318 groups++;
1319
1320 *groups++ = &netstat_group;
1321
1322#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1323 if (net->ieee80211_ptr)
1324 *groups++ = &wireless_group;
1325#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1326 else if (net->wireless_handlers)
1327 *groups++ = &wireless_group;
1328#endif
1329#endif
1330#endif
1331
1332 error = device_add(dev);
1333 if (error)
1334 return error;
1335
1336 error = register_queue_kobjects(net);
1337 if (error) {
1338 device_del(dev);
1339 return error;
1340 }
1341
1342 pm_runtime_set_memalloc_noio(dev, true);
1343
1344 return error;
1345}
1346
1347int netdev_class_create_file(struct class_attribute *class_attr)
1348{
1349 return class_create_file(&net_class, class_attr);
1350}
1351EXPORT_SYMBOL(netdev_class_create_file);
1352
1353void netdev_class_remove_file(struct class_attribute *class_attr)
1354{
1355 class_remove_file(&net_class, class_attr);
1356}
1357EXPORT_SYMBOL(netdev_class_remove_file);
1358
1359int netdev_kobject_init(void)
1360{
1361 kobj_ns_type_register(&net_ns_type_operations);
1362 return class_register(&net_class);
1363}
1364