1
2
3
4
5
6
7
8
9
10
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <net/switchdev.h>
16#include <linux/if_arp.h>
17#include <linux/slab.h>
18#include <linux/nsproxy.h>
19#include <net/sock.h>
20#include <net/net_namespace.h>
21#include <linux/rtnetlink.h>
22#include <linux/vmalloc.h>
23#include <linux/export.h>
24#include <linux/jiffies.h>
25#include <linux/pm_runtime.h>
26
27#include "net-sysfs.h"
28
29#ifdef CONFIG_SYSFS
30static const char fmt_hex[] = "%#x\n";
31static const char fmt_long_hex[] = "%#lx\n";
32static const char fmt_dec[] = "%d\n";
33static const char fmt_udec[] = "%u\n";
34static const char fmt_ulong[] = "%lu\n";
35static const char fmt_u64[] = "%llu\n";
36
37static inline int dev_isalive(const struct net_device *dev)
38{
39 return dev->reg_state <= NETREG_REGISTERED;
40}
41
42
43static ssize_t netdev_show(const struct device *dev,
44 struct device_attribute *attr, char *buf,
45 ssize_t (*format)(const struct net_device *, char *))
46{
47 struct net_device *ndev = to_net_dev(dev);
48 ssize_t ret = -EINVAL;
49
50 read_lock(&dev_base_lock);
51 if (dev_isalive(ndev))
52 ret = (*format)(ndev, buf);
53 read_unlock(&dev_base_lock);
54
55 return ret;
56}
57
58
59#define NETDEVICE_SHOW(field, format_string) \
60static ssize_t format_##field(const struct net_device *dev, char *buf) \
61{ \
62 return sprintf(buf, format_string, dev->field); \
63} \
64static ssize_t field##_show(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
66{ \
67 return netdev_show(dev, attr, buf, format_##field); \
68} \
69
70#define NETDEVICE_SHOW_RO(field, format_string) \
71NETDEVICE_SHOW(field, format_string); \
72static DEVICE_ATTR_RO(field)
73
74#define NETDEVICE_SHOW_RW(field, format_string) \
75NETDEVICE_SHOW(field, format_string); \
76static DEVICE_ATTR_RW(field)
77
78
79static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
80 const char *buf, size_t len,
81 int (*set)(struct net_device *, unsigned long))
82{
83 struct net_device *netdev = to_net_dev(dev);
84 struct net *net = dev_net(netdev);
85 unsigned long new;
86 int ret = -EINVAL;
87
88 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
89 return -EPERM;
90
91 ret = kstrtoul(buf, 0, &new);
92 if (ret)
93 goto err;
94
95 if (!rtnl_trylock())
96 return restart_syscall();
97
98 if (dev_isalive(netdev)) {
99 if ((ret = (*set)(netdev, new)) == 0)
100 ret = len;
101 }
102 rtnl_unlock();
103 err:
104 return ret;
105}
106
107NETDEVICE_SHOW_RO(dev_id, fmt_hex);
108NETDEVICE_SHOW_RO(dev_port, fmt_dec);
109NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
110NETDEVICE_SHOW_RO(addr_len, fmt_dec);
111NETDEVICE_SHOW_RO(iflink, fmt_dec);
112NETDEVICE_SHOW_RO(ifindex, fmt_dec);
113NETDEVICE_SHOW_RO(type, fmt_dec);
114NETDEVICE_SHOW_RO(link_mode, fmt_dec);
115
116static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
117{
118 return sprintf(buf, fmt_dec, dev->name_assign_type);
119}
120
121static ssize_t name_assign_type_show(struct device *dev,
122 struct device_attribute *attr,
123 char *buf)
124{
125 struct net_device *ndev = to_net_dev(dev);
126 ssize_t ret = -EINVAL;
127
128 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
129 ret = netdev_show(dev, attr, buf, format_name_assign_type);
130
131 return ret;
132}
133static DEVICE_ATTR_RO(name_assign_type);
134
135
136static ssize_t address_show(struct device *dev, struct device_attribute *attr,
137 char *buf)
138{
139 struct net_device *ndev = to_net_dev(dev);
140 ssize_t ret = -EINVAL;
141
142 read_lock(&dev_base_lock);
143 if (dev_isalive(ndev))
144 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
145 read_unlock(&dev_base_lock);
146 return ret;
147}
148static DEVICE_ATTR_RO(address);
149
150static ssize_t broadcast_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct net_device *ndev = to_net_dev(dev);
154 if (dev_isalive(ndev))
155 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
156 return -EINVAL;
157}
158static DEVICE_ATTR_RO(broadcast);
159
160static int change_carrier(struct net_device *dev, unsigned long new_carrier)
161{
162 if (!netif_running(dev))
163 return -EINVAL;
164 return dev_change_carrier(dev, (bool) new_carrier);
165}
166
167static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
168 const char *buf, size_t len)
169{
170 return netdev_store(dev, attr, buf, len, change_carrier);
171}
172
173static ssize_t carrier_show(struct device *dev,
174 struct device_attribute *attr, char *buf)
175{
176 struct net_device *netdev = to_net_dev(dev);
177 if (netif_running(netdev)) {
178 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
179 }
180 return -EINVAL;
181}
182static DEVICE_ATTR_RW(carrier);
183
184static ssize_t speed_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
186{
187 struct net_device *netdev = to_net_dev(dev);
188 int ret = -EINVAL;
189
190 if (!rtnl_trylock())
191 return restart_syscall();
192
193 if (netif_running(netdev)) {
194 struct ethtool_cmd cmd;
195 if (!__ethtool_get_settings(netdev, &cmd))
196 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
197 }
198 rtnl_unlock();
199 return ret;
200}
201static DEVICE_ATTR_RO(speed);
202
203static ssize_t duplex_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 struct net_device *netdev = to_net_dev(dev);
207 int ret = -EINVAL;
208
209 if (!rtnl_trylock())
210 return restart_syscall();
211
212 if (netif_running(netdev)) {
213 struct ethtool_cmd cmd;
214 if (!__ethtool_get_settings(netdev, &cmd)) {
215 const char *duplex;
216 switch (cmd.duplex) {
217 case DUPLEX_HALF:
218 duplex = "half";
219 break;
220 case DUPLEX_FULL:
221 duplex = "full";
222 break;
223 default:
224 duplex = "unknown";
225 break;
226 }
227 ret = sprintf(buf, "%s\n", duplex);
228 }
229 }
230 rtnl_unlock();
231 return ret;
232}
233static DEVICE_ATTR_RO(duplex);
234
235static ssize_t dormant_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237{
238 struct net_device *netdev = to_net_dev(dev);
239
240 if (netif_running(netdev))
241 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
242
243 return -EINVAL;
244}
245static DEVICE_ATTR_RO(dormant);
246
247static const char *const operstates[] = {
248 "unknown",
249 "notpresent",
250 "down",
251 "lowerlayerdown",
252 "testing",
253 "dormant",
254 "up"
255};
256
257static ssize_t operstate_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 const struct net_device *netdev = to_net_dev(dev);
261 unsigned char operstate;
262
263 read_lock(&dev_base_lock);
264 operstate = netdev->operstate;
265 if (!netif_running(netdev))
266 operstate = IF_OPER_DOWN;
267 read_unlock(&dev_base_lock);
268
269 if (operstate >= ARRAY_SIZE(operstates))
270 return -EINVAL;
271
272 return sprintf(buf, "%s\n", operstates[operstate]);
273}
274static DEVICE_ATTR_RO(operstate);
275
276static ssize_t carrier_changes_show(struct device *dev,
277 struct device_attribute *attr,
278 char *buf)
279{
280 struct net_device *netdev = to_net_dev(dev);
281 return sprintf(buf, fmt_dec,
282 atomic_read(&netdev->carrier_changes));
283}
284static DEVICE_ATTR_RO(carrier_changes);
285
286
287
288static int change_mtu(struct net_device *dev, unsigned long new_mtu)
289{
290 return dev_set_mtu(dev, (int) new_mtu);
291}
292
293static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
294 const char *buf, size_t len)
295{
296 return netdev_store(dev, attr, buf, len, change_mtu);
297}
298NETDEVICE_SHOW_RW(mtu, fmt_dec);
299
300static int change_flags(struct net_device *dev, unsigned long new_flags)
301{
302 return dev_change_flags(dev, (unsigned int) new_flags);
303}
304
305static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
306 const char *buf, size_t len)
307{
308 return netdev_store(dev, attr, buf, len, change_flags);
309}
310NETDEVICE_SHOW_RW(flags, fmt_hex);
311
312static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
313{
314 dev->tx_queue_len = new_len;
315 return 0;
316}
317
318static ssize_t tx_queue_len_store(struct device *dev,
319 struct device_attribute *attr,
320 const char *buf, size_t len)
321{
322 if (!capable(CAP_NET_ADMIN))
323 return -EPERM;
324
325 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
326}
327NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
328
329static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
330{
331 dev->gro_flush_timeout = val;
332 return 0;
333}
334
335static ssize_t gro_flush_timeout_store(struct device *dev,
336 struct device_attribute *attr,
337 const char *buf, size_t len)
338{
339 if (!capable(CAP_NET_ADMIN))
340 return -EPERM;
341
342 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
343}
344NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
345
346static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
347 const char *buf, size_t len)
348{
349 struct net_device *netdev = to_net_dev(dev);
350 struct net *net = dev_net(netdev);
351 size_t count = len;
352 ssize_t ret;
353
354 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
355 return -EPERM;
356
357
358 if (len > 0 && buf[len - 1] == '\n')
359 --count;
360
361 if (!rtnl_trylock())
362 return restart_syscall();
363 ret = dev_set_alias(netdev, buf, count);
364 rtnl_unlock();
365
366 return ret < 0 ? ret : len;
367}
368
369static ssize_t ifalias_show(struct device *dev,
370 struct device_attribute *attr, char *buf)
371{
372 const struct net_device *netdev = to_net_dev(dev);
373 ssize_t ret = 0;
374
375 if (!rtnl_trylock())
376 return restart_syscall();
377 if (netdev->ifalias)
378 ret = sprintf(buf, "%s\n", netdev->ifalias);
379 rtnl_unlock();
380 return ret;
381}
382static DEVICE_ATTR_RW(ifalias);
383
384static int change_group(struct net_device *dev, unsigned long new_group)
385{
386 dev_set_group(dev, (int) new_group);
387 return 0;
388}
389
390static ssize_t group_store(struct device *dev, struct device_attribute *attr,
391 const char *buf, size_t len)
392{
393 return netdev_store(dev, attr, buf, len, change_group);
394}
395NETDEVICE_SHOW(group, fmt_dec);
396static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
397
398static ssize_t phys_port_id_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
400{
401 struct net_device *netdev = to_net_dev(dev);
402 ssize_t ret = -EINVAL;
403
404 if (!rtnl_trylock())
405 return restart_syscall();
406
407 if (dev_isalive(netdev)) {
408 struct netdev_phys_item_id ppid;
409
410 ret = dev_get_phys_port_id(netdev, &ppid);
411 if (!ret)
412 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
413 }
414 rtnl_unlock();
415
416 return ret;
417}
418static DEVICE_ATTR_RO(phys_port_id);
419
420static ssize_t phys_switch_id_show(struct device *dev,
421 struct device_attribute *attr, char *buf)
422{
423 struct net_device *netdev = to_net_dev(dev);
424 ssize_t ret = -EINVAL;
425
426 if (!rtnl_trylock())
427 return restart_syscall();
428
429 if (dev_isalive(netdev)) {
430 struct netdev_phys_item_id ppid;
431
432 ret = netdev_switch_parent_id_get(netdev, &ppid);
433 if (!ret)
434 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
435 }
436 rtnl_unlock();
437
438 return ret;
439}
440static DEVICE_ATTR_RO(phys_switch_id);
441
442static struct attribute *net_class_attrs[] = {
443 &dev_attr_netdev_group.attr,
444 &dev_attr_type.attr,
445 &dev_attr_dev_id.attr,
446 &dev_attr_dev_port.attr,
447 &dev_attr_iflink.attr,
448 &dev_attr_ifindex.attr,
449 &dev_attr_name_assign_type.attr,
450 &dev_attr_addr_assign_type.attr,
451 &dev_attr_addr_len.attr,
452 &dev_attr_link_mode.attr,
453 &dev_attr_address.attr,
454 &dev_attr_broadcast.attr,
455 &dev_attr_speed.attr,
456 &dev_attr_duplex.attr,
457 &dev_attr_dormant.attr,
458 &dev_attr_operstate.attr,
459 &dev_attr_carrier_changes.attr,
460 &dev_attr_ifalias.attr,
461 &dev_attr_carrier.attr,
462 &dev_attr_mtu.attr,
463 &dev_attr_flags.attr,
464 &dev_attr_tx_queue_len.attr,
465 &dev_attr_gro_flush_timeout.attr,
466 &dev_attr_phys_port_id.attr,
467 &dev_attr_phys_switch_id.attr,
468 NULL,
469};
470ATTRIBUTE_GROUPS(net_class);
471
472
473static ssize_t netstat_show(const struct device *d,
474 struct device_attribute *attr, char *buf,
475 unsigned long offset)
476{
477 struct net_device *dev = to_net_dev(d);
478 ssize_t ret = -EINVAL;
479
480 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
481 offset % sizeof(u64) != 0);
482
483 read_lock(&dev_base_lock);
484 if (dev_isalive(dev)) {
485 struct rtnl_link_stats64 temp;
486 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
487
488 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
489 }
490 read_unlock(&dev_base_lock);
491 return ret;
492}
493
494
495#define NETSTAT_ENTRY(name) \
496static ssize_t name##_show(struct device *d, \
497 struct device_attribute *attr, char *buf) \
498{ \
499 return netstat_show(d, attr, buf, \
500 offsetof(struct rtnl_link_stats64, name)); \
501} \
502static DEVICE_ATTR_RO(name)
503
504NETSTAT_ENTRY(rx_packets);
505NETSTAT_ENTRY(tx_packets);
506NETSTAT_ENTRY(rx_bytes);
507NETSTAT_ENTRY(tx_bytes);
508NETSTAT_ENTRY(rx_errors);
509NETSTAT_ENTRY(tx_errors);
510NETSTAT_ENTRY(rx_dropped);
511NETSTAT_ENTRY(tx_dropped);
512NETSTAT_ENTRY(multicast);
513NETSTAT_ENTRY(collisions);
514NETSTAT_ENTRY(rx_length_errors);
515NETSTAT_ENTRY(rx_over_errors);
516NETSTAT_ENTRY(rx_crc_errors);
517NETSTAT_ENTRY(rx_frame_errors);
518NETSTAT_ENTRY(rx_fifo_errors);
519NETSTAT_ENTRY(rx_missed_errors);
520NETSTAT_ENTRY(tx_aborted_errors);
521NETSTAT_ENTRY(tx_carrier_errors);
522NETSTAT_ENTRY(tx_fifo_errors);
523NETSTAT_ENTRY(tx_heartbeat_errors);
524NETSTAT_ENTRY(tx_window_errors);
525NETSTAT_ENTRY(rx_compressed);
526NETSTAT_ENTRY(tx_compressed);
527
528static struct attribute *netstat_attrs[] = {
529 &dev_attr_rx_packets.attr,
530 &dev_attr_tx_packets.attr,
531 &dev_attr_rx_bytes.attr,
532 &dev_attr_tx_bytes.attr,
533 &dev_attr_rx_errors.attr,
534 &dev_attr_tx_errors.attr,
535 &dev_attr_rx_dropped.attr,
536 &dev_attr_tx_dropped.attr,
537 &dev_attr_multicast.attr,
538 &dev_attr_collisions.attr,
539 &dev_attr_rx_length_errors.attr,
540 &dev_attr_rx_over_errors.attr,
541 &dev_attr_rx_crc_errors.attr,
542 &dev_attr_rx_frame_errors.attr,
543 &dev_attr_rx_fifo_errors.attr,
544 &dev_attr_rx_missed_errors.attr,
545 &dev_attr_tx_aborted_errors.attr,
546 &dev_attr_tx_carrier_errors.attr,
547 &dev_attr_tx_fifo_errors.attr,
548 &dev_attr_tx_heartbeat_errors.attr,
549 &dev_attr_tx_window_errors.attr,
550 &dev_attr_rx_compressed.attr,
551 &dev_attr_tx_compressed.attr,
552 NULL
553};
554
555
556static struct attribute_group netstat_group = {
557 .name = "statistics",
558 .attrs = netstat_attrs,
559};
560
561#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
562static struct attribute *wireless_attrs[] = {
563 NULL
564};
565
566static struct attribute_group wireless_group = {
567 .name = "wireless",
568 .attrs = wireless_attrs,
569};
570#endif
571
572#else
573#define net_class_groups NULL
574#endif
575
576#ifdef CONFIG_SYSFS
577#define to_rx_queue_attr(_attr) container_of(_attr, \
578 struct rx_queue_attribute, attr)
579
580#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
581
582static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
583 char *buf)
584{
585 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
586 struct netdev_rx_queue *queue = to_rx_queue(kobj);
587
588 if (!attribute->show)
589 return -EIO;
590
591 return attribute->show(queue, attribute, buf);
592}
593
594static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
595 const char *buf, size_t count)
596{
597 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
598 struct netdev_rx_queue *queue = to_rx_queue(kobj);
599
600 if (!attribute->store)
601 return -EIO;
602
603 return attribute->store(queue, attribute, buf, count);
604}
605
606static const struct sysfs_ops rx_queue_sysfs_ops = {
607 .show = rx_queue_attr_show,
608 .store = rx_queue_attr_store,
609};
610
611#ifdef CONFIG_RPS
612static ssize_t show_rps_map(struct netdev_rx_queue *queue,
613 struct rx_queue_attribute *attribute, char *buf)
614{
615 struct rps_map *map;
616 cpumask_var_t mask;
617 int i, len;
618
619 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
620 return -ENOMEM;
621
622 rcu_read_lock();
623 map = rcu_dereference(queue->rps_map);
624 if (map)
625 for (i = 0; i < map->len; i++)
626 cpumask_set_cpu(map->cpus[i], mask);
627
628 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
629 rcu_read_unlock();
630 free_cpumask_var(mask);
631
632 return len < PAGE_SIZE ? len : -EINVAL;
633}
634
635static ssize_t store_rps_map(struct netdev_rx_queue *queue,
636 struct rx_queue_attribute *attribute,
637 const char *buf, size_t len)
638{
639 struct rps_map *old_map, *map;
640 cpumask_var_t mask;
641 int err, cpu, i;
642 static DEFINE_SPINLOCK(rps_map_lock);
643
644 if (!capable(CAP_NET_ADMIN))
645 return -EPERM;
646
647 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
648 return -ENOMEM;
649
650 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
651 if (err) {
652 free_cpumask_var(mask);
653 return err;
654 }
655
656 map = kzalloc(max_t(unsigned int,
657 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
658 GFP_KERNEL);
659 if (!map) {
660 free_cpumask_var(mask);
661 return -ENOMEM;
662 }
663
664 i = 0;
665 for_each_cpu_and(cpu, mask, cpu_online_mask)
666 map->cpus[i++] = cpu;
667
668 if (i)
669 map->len = i;
670 else {
671 kfree(map);
672 map = NULL;
673 }
674
675 spin_lock(&rps_map_lock);
676 old_map = rcu_dereference_protected(queue->rps_map,
677 lockdep_is_held(&rps_map_lock));
678 rcu_assign_pointer(queue->rps_map, map);
679 spin_unlock(&rps_map_lock);
680
681 if (map)
682 static_key_slow_inc(&rps_needed);
683 if (old_map) {
684 kfree_rcu(old_map, rcu);
685 static_key_slow_dec(&rps_needed);
686 }
687 free_cpumask_var(mask);
688 return len;
689}
690
691static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
692 struct rx_queue_attribute *attr,
693 char *buf)
694{
695 struct rps_dev_flow_table *flow_table;
696 unsigned long val = 0;
697
698 rcu_read_lock();
699 flow_table = rcu_dereference(queue->rps_flow_table);
700 if (flow_table)
701 val = (unsigned long)flow_table->mask + 1;
702 rcu_read_unlock();
703
704 return sprintf(buf, "%lu\n", val);
705}
706
707static void rps_dev_flow_table_release(struct rcu_head *rcu)
708{
709 struct rps_dev_flow_table *table = container_of(rcu,
710 struct rps_dev_flow_table, rcu);
711 vfree(table);
712}
713
714static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
715 struct rx_queue_attribute *attr,
716 const char *buf, size_t len)
717{
718 unsigned long mask, count;
719 struct rps_dev_flow_table *table, *old_table;
720 static DEFINE_SPINLOCK(rps_dev_flow_lock);
721 int rc;
722
723 if (!capable(CAP_NET_ADMIN))
724 return -EPERM;
725
726 rc = kstrtoul(buf, 0, &count);
727 if (rc < 0)
728 return rc;
729
730 if (count) {
731 mask = count - 1;
732
733
734
735 while ((mask | (mask >> 1)) != mask)
736 mask |= (mask >> 1);
737
738
739
740
741#if BITS_PER_LONG > 32
742 if (mask > (unsigned long)(u32)mask)
743 return -EINVAL;
744#else
745 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
746 / sizeof(struct rps_dev_flow)) {
747
748 return -EINVAL;
749 }
750#endif
751 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
752 if (!table)
753 return -ENOMEM;
754
755 table->mask = mask;
756 for (count = 0; count <= mask; count++)
757 table->flows[count].cpu = RPS_NO_CPU;
758 } else
759 table = NULL;
760
761 spin_lock(&rps_dev_flow_lock);
762 old_table = rcu_dereference_protected(queue->rps_flow_table,
763 lockdep_is_held(&rps_dev_flow_lock));
764 rcu_assign_pointer(queue->rps_flow_table, table);
765 spin_unlock(&rps_dev_flow_lock);
766
767 if (old_table)
768 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
769
770 return len;
771}
772
773static struct rx_queue_attribute rps_cpus_attribute =
774 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
775
776
777static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
778 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
779 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
780#endif
781
782static struct attribute *rx_queue_default_attrs[] = {
783#ifdef CONFIG_RPS
784 &rps_cpus_attribute.attr,
785 &rps_dev_flow_table_cnt_attribute.attr,
786#endif
787 NULL
788};
789
790static void rx_queue_release(struct kobject *kobj)
791{
792 struct netdev_rx_queue *queue = to_rx_queue(kobj);
793#ifdef CONFIG_RPS
794 struct rps_map *map;
795 struct rps_dev_flow_table *flow_table;
796
797
798 map = rcu_dereference_protected(queue->rps_map, 1);
799 if (map) {
800 RCU_INIT_POINTER(queue->rps_map, NULL);
801 kfree_rcu(map, rcu);
802 }
803
804 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
805 if (flow_table) {
806 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
807 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
808 }
809#endif
810
811 memset(kobj, 0, sizeof(*kobj));
812 dev_put(queue->dev);
813}
814
815static const void *rx_queue_namespace(struct kobject *kobj)
816{
817 struct netdev_rx_queue *queue = to_rx_queue(kobj);
818 struct device *dev = &queue->dev->dev;
819 const void *ns = NULL;
820
821 if (dev->class && dev->class->ns_type)
822 ns = dev->class->namespace(dev);
823
824 return ns;
825}
826
827static struct kobj_type rx_queue_ktype = {
828 .sysfs_ops = &rx_queue_sysfs_ops,
829 .release = rx_queue_release,
830 .default_attrs = rx_queue_default_attrs,
831 .namespace = rx_queue_namespace
832};
833
834static int rx_queue_add_kobject(struct net_device *dev, int index)
835{
836 struct netdev_rx_queue *queue = dev->_rx + index;
837 struct kobject *kobj = &queue->kobj;
838 int error = 0;
839
840 kobj->kset = dev->queues_kset;
841 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
842 "rx-%u", index);
843 if (error)
844 goto exit;
845
846 if (dev->sysfs_rx_queue_group) {
847 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
848 if (error)
849 goto exit;
850 }
851
852 kobject_uevent(kobj, KOBJ_ADD);
853 dev_hold(queue->dev);
854
855 return error;
856exit:
857 kobject_put(kobj);
858 return error;
859}
860#endif
861
862int
863net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
864{
865#ifdef CONFIG_SYSFS
866 int i;
867 int error = 0;
868
869#ifndef CONFIG_RPS
870 if (!dev->sysfs_rx_queue_group)
871 return 0;
872#endif
873 for (i = old_num; i < new_num; i++) {
874 error = rx_queue_add_kobject(dev, i);
875 if (error) {
876 new_num = old_num;
877 break;
878 }
879 }
880
881 while (--i >= new_num) {
882 if (dev->sysfs_rx_queue_group)
883 sysfs_remove_group(&dev->_rx[i].kobj,
884 dev->sysfs_rx_queue_group);
885 kobject_put(&dev->_rx[i].kobj);
886 }
887
888 return error;
889#else
890 return 0;
891#endif
892}
893
894#ifdef CONFIG_SYSFS
895
896
897
898struct netdev_queue_attribute {
899 struct attribute attr;
900 ssize_t (*show)(struct netdev_queue *queue,
901 struct netdev_queue_attribute *attr, char *buf);
902 ssize_t (*store)(struct netdev_queue *queue,
903 struct netdev_queue_attribute *attr, const char *buf, size_t len);
904};
905#define to_netdev_queue_attr(_attr) container_of(_attr, \
906 struct netdev_queue_attribute, attr)
907
908#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
909
910static ssize_t netdev_queue_attr_show(struct kobject *kobj,
911 struct attribute *attr, char *buf)
912{
913 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
914 struct netdev_queue *queue = to_netdev_queue(kobj);
915
916 if (!attribute->show)
917 return -EIO;
918
919 return attribute->show(queue, attribute, buf);
920}
921
922static ssize_t netdev_queue_attr_store(struct kobject *kobj,
923 struct attribute *attr,
924 const char *buf, size_t count)
925{
926 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
927 struct netdev_queue *queue = to_netdev_queue(kobj);
928
929 if (!attribute->store)
930 return -EIO;
931
932 return attribute->store(queue, attribute, buf, count);
933}
934
935static const struct sysfs_ops netdev_queue_sysfs_ops = {
936 .show = netdev_queue_attr_show,
937 .store = netdev_queue_attr_store,
938};
939
940static ssize_t show_trans_timeout(struct netdev_queue *queue,
941 struct netdev_queue_attribute *attribute,
942 char *buf)
943{
944 unsigned long trans_timeout;
945
946 spin_lock_irq(&queue->_xmit_lock);
947 trans_timeout = queue->trans_timeout;
948 spin_unlock_irq(&queue->_xmit_lock);
949
950 return sprintf(buf, "%lu", trans_timeout);
951}
952
953static struct netdev_queue_attribute queue_trans_timeout =
954 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
955
956#ifdef CONFIG_BQL
957
958
959
960static ssize_t bql_show(char *buf, unsigned int value)
961{
962 return sprintf(buf, "%u\n", value);
963}
964
965static ssize_t bql_set(const char *buf, const size_t count,
966 unsigned int *pvalue)
967{
968 unsigned int value;
969 int err;
970
971 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
972 value = DQL_MAX_LIMIT;
973 else {
974 err = kstrtouint(buf, 10, &value);
975 if (err < 0)
976 return err;
977 if (value > DQL_MAX_LIMIT)
978 return -EINVAL;
979 }
980
981 *pvalue = value;
982
983 return count;
984}
985
986static ssize_t bql_show_hold_time(struct netdev_queue *queue,
987 struct netdev_queue_attribute *attr,
988 char *buf)
989{
990 struct dql *dql = &queue->dql;
991
992 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
993}
994
995static ssize_t bql_set_hold_time(struct netdev_queue *queue,
996 struct netdev_queue_attribute *attribute,
997 const char *buf, size_t len)
998{
999 struct dql *dql = &queue->dql;
1000 unsigned int value;
1001 int err;
1002
1003 err = kstrtouint(buf, 10, &value);
1004 if (err < 0)
1005 return err;
1006
1007 dql->slack_hold_time = msecs_to_jiffies(value);
1008
1009 return len;
1010}
1011
1012static struct netdev_queue_attribute bql_hold_time_attribute =
1013 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1014 bql_set_hold_time);
1015
1016static ssize_t bql_show_inflight(struct netdev_queue *queue,
1017 struct netdev_queue_attribute *attr,
1018 char *buf)
1019{
1020 struct dql *dql = &queue->dql;
1021
1022 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1023}
1024
1025static struct netdev_queue_attribute bql_inflight_attribute =
1026 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1027
1028#define BQL_ATTR(NAME, FIELD) \
1029static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1030 struct netdev_queue_attribute *attr, \
1031 char *buf) \
1032{ \
1033 return bql_show(buf, queue->dql.FIELD); \
1034} \
1035 \
1036static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1037 struct netdev_queue_attribute *attr, \
1038 const char *buf, size_t len) \
1039{ \
1040 return bql_set(buf, len, &queue->dql.FIELD); \
1041} \
1042 \
1043static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1044 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1045 bql_set_ ## NAME);
1046
1047BQL_ATTR(limit, limit)
1048BQL_ATTR(limit_max, max_limit)
1049BQL_ATTR(limit_min, min_limit)
1050
1051static struct attribute *dql_attrs[] = {
1052 &bql_limit_attribute.attr,
1053 &bql_limit_max_attribute.attr,
1054 &bql_limit_min_attribute.attr,
1055 &bql_hold_time_attribute.attr,
1056 &bql_inflight_attribute.attr,
1057 NULL
1058};
1059
1060static struct attribute_group dql_group = {
1061 .name = "byte_queue_limits",
1062 .attrs = dql_attrs,
1063};
1064#endif
1065
1066#ifdef CONFIG_XPS
1067static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1068{
1069 struct net_device *dev = queue->dev;
1070 unsigned int i;
1071
1072 i = queue - dev->_tx;
1073 BUG_ON(i >= dev->num_tx_queues);
1074
1075 return i;
1076}
1077
1078
1079static ssize_t show_xps_map(struct netdev_queue *queue,
1080 struct netdev_queue_attribute *attribute, char *buf)
1081{
1082 struct net_device *dev = queue->dev;
1083 struct xps_dev_maps *dev_maps;
1084 cpumask_var_t mask;
1085 unsigned long index;
1086 int i, len;
1087
1088 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1089 return -ENOMEM;
1090
1091 index = get_netdev_queue_index(queue);
1092
1093 rcu_read_lock();
1094 dev_maps = rcu_dereference(dev->xps_maps);
1095 if (dev_maps) {
1096 for_each_possible_cpu(i) {
1097 struct xps_map *map =
1098 rcu_dereference(dev_maps->cpu_map[i]);
1099 if (map) {
1100 int j;
1101 for (j = 0; j < map->len; j++) {
1102 if (map->queues[j] == index) {
1103 cpumask_set_cpu(i, mask);
1104 break;
1105 }
1106 }
1107 }
1108 }
1109 }
1110 rcu_read_unlock();
1111
1112 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1113 free_cpumask_var(mask);
1114 return len < PAGE_SIZE ? len : -EINVAL;
1115}
1116
1117static ssize_t store_xps_map(struct netdev_queue *queue,
1118 struct netdev_queue_attribute *attribute,
1119 const char *buf, size_t len)
1120{
1121 struct net_device *dev = queue->dev;
1122 unsigned long index;
1123 cpumask_var_t mask;
1124 int err;
1125
1126 if (!capable(CAP_NET_ADMIN))
1127 return -EPERM;
1128
1129 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1130 return -ENOMEM;
1131
1132 index = get_netdev_queue_index(queue);
1133
1134 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1135 if (err) {
1136 free_cpumask_var(mask);
1137 return err;
1138 }
1139
1140 err = netif_set_xps_queue(dev, mask, index);
1141
1142 free_cpumask_var(mask);
1143
1144 return err ? : len;
1145}
1146
1147static struct netdev_queue_attribute xps_cpus_attribute =
1148 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1149#endif
1150
1151static struct attribute *netdev_queue_default_attrs[] = {
1152 &queue_trans_timeout.attr,
1153#ifdef CONFIG_XPS
1154 &xps_cpus_attribute.attr,
1155#endif
1156 NULL
1157};
1158
1159static void netdev_queue_release(struct kobject *kobj)
1160{
1161 struct netdev_queue *queue = to_netdev_queue(kobj);
1162
1163 memset(kobj, 0, sizeof(*kobj));
1164 dev_put(queue->dev);
1165}
1166
1167static const void *netdev_queue_namespace(struct kobject *kobj)
1168{
1169 struct netdev_queue *queue = to_netdev_queue(kobj);
1170 struct device *dev = &queue->dev->dev;
1171 const void *ns = NULL;
1172
1173 if (dev->class && dev->class->ns_type)
1174 ns = dev->class->namespace(dev);
1175
1176 return ns;
1177}
1178
1179static struct kobj_type netdev_queue_ktype = {
1180 .sysfs_ops = &netdev_queue_sysfs_ops,
1181 .release = netdev_queue_release,
1182 .default_attrs = netdev_queue_default_attrs,
1183 .namespace = netdev_queue_namespace,
1184};
1185
1186static int netdev_queue_add_kobject(struct net_device *dev, int index)
1187{
1188 struct netdev_queue *queue = dev->_tx + index;
1189 struct kobject *kobj = &queue->kobj;
1190 int error = 0;
1191
1192 kobj->kset = dev->queues_kset;
1193 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1194 "tx-%u", index);
1195 if (error)
1196 goto exit;
1197
1198#ifdef CONFIG_BQL
1199 error = sysfs_create_group(kobj, &dql_group);
1200 if (error)
1201 goto exit;
1202#endif
1203
1204 kobject_uevent(kobj, KOBJ_ADD);
1205 dev_hold(queue->dev);
1206
1207 return 0;
1208exit:
1209 kobject_put(kobj);
1210 return error;
1211}
1212#endif
1213
1214int
1215netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1216{
1217#ifdef CONFIG_SYSFS
1218 int i;
1219 int error = 0;
1220
1221 for (i = old_num; i < new_num; i++) {
1222 error = netdev_queue_add_kobject(dev, i);
1223 if (error) {
1224 new_num = old_num;
1225 break;
1226 }
1227 }
1228
1229 while (--i >= new_num) {
1230 struct netdev_queue *queue = dev->_tx + i;
1231
1232#ifdef CONFIG_BQL
1233 sysfs_remove_group(&queue->kobj, &dql_group);
1234#endif
1235 kobject_put(&queue->kobj);
1236 }
1237
1238 return error;
1239#else
1240 return 0;
1241#endif
1242}
1243
1244static int register_queue_kobjects(struct net_device *dev)
1245{
1246 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1247
1248#ifdef CONFIG_SYSFS
1249 dev->queues_kset = kset_create_and_add("queues",
1250 NULL, &dev->dev.kobj);
1251 if (!dev->queues_kset)
1252 return -ENOMEM;
1253 real_rx = dev->real_num_rx_queues;
1254#endif
1255 real_tx = dev->real_num_tx_queues;
1256
1257 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1258 if (error)
1259 goto error;
1260 rxq = real_rx;
1261
1262 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1263 if (error)
1264 goto error;
1265 txq = real_tx;
1266
1267 return 0;
1268
1269error:
1270 netdev_queue_update_kobjects(dev, txq, 0);
1271 net_rx_queue_update_kobjects(dev, rxq, 0);
1272 return error;
1273}
1274
1275static void remove_queue_kobjects(struct net_device *dev)
1276{
1277 int real_rx = 0, real_tx = 0;
1278
1279#ifdef CONFIG_SYSFS
1280 real_rx = dev->real_num_rx_queues;
1281#endif
1282 real_tx = dev->real_num_tx_queues;
1283
1284 net_rx_queue_update_kobjects(dev, real_rx, 0);
1285 netdev_queue_update_kobjects(dev, real_tx, 0);
1286#ifdef CONFIG_SYSFS
1287 kset_unregister(dev->queues_kset);
1288#endif
1289}
1290
1291static bool net_current_may_mount(void)
1292{
1293 struct net *net = current->nsproxy->net_ns;
1294
1295 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1296}
1297
1298static void *net_grab_current_ns(void)
1299{
1300 struct net *ns = current->nsproxy->net_ns;
1301#ifdef CONFIG_NET_NS
1302 if (ns)
1303 atomic_inc(&ns->passive);
1304#endif
1305 return ns;
1306}
1307
1308static const void *net_initial_ns(void)
1309{
1310 return &init_net;
1311}
1312
1313static const void *net_netlink_ns(struct sock *sk)
1314{
1315 return sock_net(sk);
1316}
1317
1318struct kobj_ns_type_operations net_ns_type_operations = {
1319 .type = KOBJ_NS_TYPE_NET,
1320 .current_may_mount = net_current_may_mount,
1321 .grab_current_ns = net_grab_current_ns,
1322 .netlink_ns = net_netlink_ns,
1323 .initial_ns = net_initial_ns,
1324 .drop_ns = net_drop_ns,
1325};
1326EXPORT_SYMBOL_GPL(net_ns_type_operations);
1327
1328static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1329{
1330 struct net_device *dev = to_net_dev(d);
1331 int retval;
1332
1333
1334 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1335 if (retval)
1336 goto exit;
1337
1338
1339
1340
1341 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1342
1343exit:
1344 return retval;
1345}
1346
1347
1348
1349
1350
1351static void netdev_release(struct device *d)
1352{
1353 struct net_device *dev = to_net_dev(d);
1354
1355 BUG_ON(dev->reg_state != NETREG_RELEASED);
1356
1357 kfree(dev->ifalias);
1358 netdev_freemem(dev);
1359}
1360
1361static const void *net_namespace(struct device *d)
1362{
1363 struct net_device *dev;
1364 dev = container_of(d, struct net_device, dev);
1365 return dev_net(dev);
1366}
1367
1368static struct class net_class = {
1369 .name = "net",
1370 .dev_release = netdev_release,
1371 .dev_groups = net_class_groups,
1372 .dev_uevent = netdev_uevent,
1373 .ns_type = &net_ns_type_operations,
1374 .namespace = net_namespace,
1375};
1376
1377
1378
1379
1380void netdev_unregister_kobject(struct net_device *ndev)
1381{
1382 struct device *dev = &(ndev->dev);
1383
1384 kobject_get(&dev->kobj);
1385
1386 remove_queue_kobjects(ndev);
1387
1388 pm_runtime_set_memalloc_noio(dev, false);
1389
1390 device_del(dev);
1391}
1392
1393
1394int netdev_register_kobject(struct net_device *ndev)
1395{
1396 struct device *dev = &(ndev->dev);
1397 const struct attribute_group **groups = ndev->sysfs_groups;
1398 int error = 0;
1399
1400 device_initialize(dev);
1401 dev->class = &net_class;
1402 dev->platform_data = ndev;
1403 dev->groups = groups;
1404
1405 dev_set_name(dev, "%s", ndev->name);
1406
1407#ifdef CONFIG_SYSFS
1408
1409 if (*groups)
1410 groups++;
1411
1412 *groups++ = &netstat_group;
1413
1414#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1415 if (ndev->ieee80211_ptr)
1416 *groups++ = &wireless_group;
1417#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1418 else if (ndev->wireless_handlers)
1419 *groups++ = &wireless_group;
1420#endif
1421#endif
1422#endif
1423
1424 error = device_add(dev);
1425 if (error)
1426 return error;
1427
1428 error = register_queue_kobjects(ndev);
1429 if (error) {
1430 device_del(dev);
1431 return error;
1432 }
1433
1434 pm_runtime_set_memalloc_noio(dev, true);
1435
1436 return error;
1437}
1438
1439int netdev_class_create_file_ns(struct class_attribute *class_attr,
1440 const void *ns)
1441{
1442 return class_create_file_ns(&net_class, class_attr, ns);
1443}
1444EXPORT_SYMBOL(netdev_class_create_file_ns);
1445
1446void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1447 const void *ns)
1448{
1449 class_remove_file_ns(&net_class, class_attr, ns);
1450}
1451EXPORT_SYMBOL(netdev_class_remove_file_ns);
1452
1453int __init netdev_kobject_init(void)
1454{
1455 kobj_ns_type_register(&net_ns_type_operations);
1456 return class_register(&net_class);
1457}
1458