1
2
3
4
5
6
7
8
9
10
11
12#include <linux/capability.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <net/switchdev.h>
16#include <linux/if_arp.h>
17#include <linux/slab.h>
18#include <linux/nsproxy.h>
19#include <net/sock.h>
20#include <net/net_namespace.h>
21#include <linux/rtnetlink.h>
22#include <linux/vmalloc.h>
23#include <linux/export.h>
24#include <linux/jiffies.h>
25#include <linux/pm_runtime.h>
26#include <linux/of.h>
27
28#include "net-sysfs.h"
29
30#ifdef CONFIG_SYSFS
31static const char fmt_hex[] = "%#x\n";
32static const char fmt_long_hex[] = "%#lx\n";
33static const char fmt_dec[] = "%d\n";
34static const char fmt_ulong[] = "%lu\n";
35static const char fmt_u64[] = "%llu\n";
36
37static inline int dev_isalive(const struct net_device *dev)
38{
39 return dev->reg_state <= NETREG_REGISTERED;
40}
41
42
43static ssize_t netdev_show(const struct device *dev,
44 struct device_attribute *attr, char *buf,
45 ssize_t (*format)(const struct net_device *, char *))
46{
47 struct net_device *ndev = to_net_dev(dev);
48 ssize_t ret = -EINVAL;
49
50 read_lock(&dev_base_lock);
51 if (dev_isalive(ndev))
52 ret = (*format)(ndev, buf);
53 read_unlock(&dev_base_lock);
54
55 return ret;
56}
57
58
59#define NETDEVICE_SHOW(field, format_string) \
60static ssize_t format_##field(const struct net_device *dev, char *buf) \
61{ \
62 return sprintf(buf, format_string, dev->field); \
63} \
64static ssize_t field##_show(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
66{ \
67 return netdev_show(dev, attr, buf, format_##field); \
68} \
69
70#define NETDEVICE_SHOW_RO(field, format_string) \
71NETDEVICE_SHOW(field, format_string); \
72static DEVICE_ATTR_RO(field)
73
74#define NETDEVICE_SHOW_RW(field, format_string) \
75NETDEVICE_SHOW(field, format_string); \
76static DEVICE_ATTR_RW(field)
77
78
79static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
80 const char *buf, size_t len,
81 int (*set)(struct net_device *, unsigned long))
82{
83 struct net_device *netdev = to_net_dev(dev);
84 struct net *net = dev_net(netdev);
85 unsigned long new;
86 int ret = -EINVAL;
87
88 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
89 return -EPERM;
90
91 ret = kstrtoul(buf, 0, &new);
92 if (ret)
93 goto err;
94
95 if (!rtnl_trylock())
96 return restart_syscall();
97
98 if (dev_isalive(netdev)) {
99 if ((ret = (*set)(netdev, new)) == 0)
100 ret = len;
101 }
102 rtnl_unlock();
103 err:
104 return ret;
105}
106
107NETDEVICE_SHOW_RO(dev_id, fmt_hex);
108NETDEVICE_SHOW_RO(dev_port, fmt_dec);
109NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
110NETDEVICE_SHOW_RO(addr_len, fmt_dec);
111NETDEVICE_SHOW_RO(ifindex, fmt_dec);
112NETDEVICE_SHOW_RO(type, fmt_dec);
113NETDEVICE_SHOW_RO(link_mode, fmt_dec);
114
115static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
116 char *buf)
117{
118 struct net_device *ndev = to_net_dev(dev);
119
120 return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
121}
122static DEVICE_ATTR_RO(iflink);
123
124static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
125{
126 return sprintf(buf, fmt_dec, dev->name_assign_type);
127}
128
129static ssize_t name_assign_type_show(struct device *dev,
130 struct device_attribute *attr,
131 char *buf)
132{
133 struct net_device *ndev = to_net_dev(dev);
134 ssize_t ret = -EINVAL;
135
136 if (ndev->name_assign_type != NET_NAME_UNKNOWN)
137 ret = netdev_show(dev, attr, buf, format_name_assign_type);
138
139 return ret;
140}
141static DEVICE_ATTR_RO(name_assign_type);
142
143
144static ssize_t address_show(struct device *dev, struct device_attribute *attr,
145 char *buf)
146{
147 struct net_device *ndev = to_net_dev(dev);
148 ssize_t ret = -EINVAL;
149
150 read_lock(&dev_base_lock);
151 if (dev_isalive(ndev))
152 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
153 read_unlock(&dev_base_lock);
154 return ret;
155}
156static DEVICE_ATTR_RO(address);
157
158static ssize_t broadcast_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct net_device *ndev = to_net_dev(dev);
162 if (dev_isalive(ndev))
163 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
164 return -EINVAL;
165}
166static DEVICE_ATTR_RO(broadcast);
167
168static int change_carrier(struct net_device *dev, unsigned long new_carrier)
169{
170 if (!netif_running(dev))
171 return -EINVAL;
172 return dev_change_carrier(dev, (bool) new_carrier);
173}
174
175static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
176 const char *buf, size_t len)
177{
178 return netdev_store(dev, attr, buf, len, change_carrier);
179}
180
181static ssize_t carrier_show(struct device *dev,
182 struct device_attribute *attr, char *buf)
183{
184 struct net_device *netdev = to_net_dev(dev);
185 if (netif_running(netdev)) {
186 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
187 }
188 return -EINVAL;
189}
190static DEVICE_ATTR_RW(carrier);
191
192static ssize_t speed_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
194{
195 struct net_device *netdev = to_net_dev(dev);
196 int ret = -EINVAL;
197
198 if (!rtnl_trylock())
199 return restart_syscall();
200
201 if (netif_running(netdev)) {
202 struct ethtool_cmd cmd;
203 if (!__ethtool_get_settings(netdev, &cmd))
204 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
205 }
206 rtnl_unlock();
207 return ret;
208}
209static DEVICE_ATTR_RO(speed);
210
211static ssize_t duplex_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 struct net_device *netdev = to_net_dev(dev);
215 int ret = -EINVAL;
216
217 if (!rtnl_trylock())
218 return restart_syscall();
219
220 if (netif_running(netdev)) {
221 struct ethtool_cmd cmd;
222 if (!__ethtool_get_settings(netdev, &cmd)) {
223 const char *duplex;
224 switch (cmd.duplex) {
225 case DUPLEX_HALF:
226 duplex = "half";
227 break;
228 case DUPLEX_FULL:
229 duplex = "full";
230 break;
231 default:
232 duplex = "unknown";
233 break;
234 }
235 ret = sprintf(buf, "%s\n", duplex);
236 }
237 }
238 rtnl_unlock();
239 return ret;
240}
241static DEVICE_ATTR_RO(duplex);
242
243static ssize_t dormant_show(struct device *dev,
244 struct device_attribute *attr, char *buf)
245{
246 struct net_device *netdev = to_net_dev(dev);
247
248 if (netif_running(netdev))
249 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
250
251 return -EINVAL;
252}
253static DEVICE_ATTR_RO(dormant);
254
255static const char *const operstates[] = {
256 "unknown",
257 "notpresent",
258 "down",
259 "lowerlayerdown",
260 "testing",
261 "dormant",
262 "up"
263};
264
265static ssize_t operstate_show(struct device *dev,
266 struct device_attribute *attr, char *buf)
267{
268 const struct net_device *netdev = to_net_dev(dev);
269 unsigned char operstate;
270
271 read_lock(&dev_base_lock);
272 operstate = netdev->operstate;
273 if (!netif_running(netdev))
274 operstate = IF_OPER_DOWN;
275 read_unlock(&dev_base_lock);
276
277 if (operstate >= ARRAY_SIZE(operstates))
278 return -EINVAL;
279
280 return sprintf(buf, "%s\n", operstates[operstate]);
281}
282static DEVICE_ATTR_RO(operstate);
283
284static ssize_t carrier_changes_show(struct device *dev,
285 struct device_attribute *attr,
286 char *buf)
287{
288 struct net_device *netdev = to_net_dev(dev);
289 return sprintf(buf, fmt_dec,
290 atomic_read(&netdev->carrier_changes));
291}
292static DEVICE_ATTR_RO(carrier_changes);
293
294
295
296static int change_mtu(struct net_device *dev, unsigned long new_mtu)
297{
298 return dev_set_mtu(dev, (int) new_mtu);
299}
300
301static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
302 const char *buf, size_t len)
303{
304 return netdev_store(dev, attr, buf, len, change_mtu);
305}
306NETDEVICE_SHOW_RW(mtu, fmt_dec);
307
308static int change_flags(struct net_device *dev, unsigned long new_flags)
309{
310 return dev_change_flags(dev, (unsigned int) new_flags);
311}
312
313static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
314 const char *buf, size_t len)
315{
316 return netdev_store(dev, attr, buf, len, change_flags);
317}
318NETDEVICE_SHOW_RW(flags, fmt_hex);
319
320static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
321{
322 dev->tx_queue_len = new_len;
323 return 0;
324}
325
326static ssize_t tx_queue_len_store(struct device *dev,
327 struct device_attribute *attr,
328 const char *buf, size_t len)
329{
330 if (!capable(CAP_NET_ADMIN))
331 return -EPERM;
332
333 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
334}
335NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
336
337static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
338{
339 dev->gro_flush_timeout = val;
340 return 0;
341}
342
343static ssize_t gro_flush_timeout_store(struct device *dev,
344 struct device_attribute *attr,
345 const char *buf, size_t len)
346{
347 if (!capable(CAP_NET_ADMIN))
348 return -EPERM;
349
350 return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
351}
352NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
353
354static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
355 const char *buf, size_t len)
356{
357 struct net_device *netdev = to_net_dev(dev);
358 struct net *net = dev_net(netdev);
359 size_t count = len;
360 ssize_t ret;
361
362 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
363 return -EPERM;
364
365
366 if (len > 0 && buf[len - 1] == '\n')
367 --count;
368
369 if (!rtnl_trylock())
370 return restart_syscall();
371 ret = dev_set_alias(netdev, buf, count);
372 rtnl_unlock();
373
374 return ret < 0 ? ret : len;
375}
376
377static ssize_t ifalias_show(struct device *dev,
378 struct device_attribute *attr, char *buf)
379{
380 const struct net_device *netdev = to_net_dev(dev);
381 ssize_t ret = 0;
382
383 if (!rtnl_trylock())
384 return restart_syscall();
385 if (netdev->ifalias)
386 ret = sprintf(buf, "%s\n", netdev->ifalias);
387 rtnl_unlock();
388 return ret;
389}
390static DEVICE_ATTR_RW(ifalias);
391
392static int change_group(struct net_device *dev, unsigned long new_group)
393{
394 dev_set_group(dev, (int) new_group);
395 return 0;
396}
397
398static ssize_t group_store(struct device *dev, struct device_attribute *attr,
399 const char *buf, size_t len)
400{
401 return netdev_store(dev, attr, buf, len, change_group);
402}
403NETDEVICE_SHOW(group, fmt_dec);
404static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
405
406static int change_proto_down(struct net_device *dev, unsigned long proto_down)
407{
408 return dev_change_proto_down(dev, (bool) proto_down);
409}
410
411static ssize_t proto_down_store(struct device *dev,
412 struct device_attribute *attr,
413 const char *buf, size_t len)
414{
415 return netdev_store(dev, attr, buf, len, change_proto_down);
416}
417NETDEVICE_SHOW_RW(proto_down, fmt_dec);
418
419static ssize_t phys_port_id_show(struct device *dev,
420 struct device_attribute *attr, char *buf)
421{
422 struct net_device *netdev = to_net_dev(dev);
423 ssize_t ret = -EINVAL;
424
425 if (!rtnl_trylock())
426 return restart_syscall();
427
428 if (dev_isalive(netdev)) {
429 struct netdev_phys_item_id ppid;
430
431 ret = dev_get_phys_port_id(netdev, &ppid);
432 if (!ret)
433 ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
434 }
435 rtnl_unlock();
436
437 return ret;
438}
439static DEVICE_ATTR_RO(phys_port_id);
440
441static ssize_t phys_port_name_show(struct device *dev,
442 struct device_attribute *attr, char *buf)
443{
444 struct net_device *netdev = to_net_dev(dev);
445 ssize_t ret = -EINVAL;
446
447 if (!rtnl_trylock())
448 return restart_syscall();
449
450 if (dev_isalive(netdev)) {
451 char name[IFNAMSIZ];
452
453 ret = dev_get_phys_port_name(netdev, name, sizeof(name));
454 if (!ret)
455 ret = sprintf(buf, "%s\n", name);
456 }
457 rtnl_unlock();
458
459 return ret;
460}
461static DEVICE_ATTR_RO(phys_port_name);
462
463static ssize_t phys_switch_id_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
465{
466 struct net_device *netdev = to_net_dev(dev);
467 ssize_t ret = -EINVAL;
468
469 if (!rtnl_trylock())
470 return restart_syscall();
471
472 if (dev_isalive(netdev)) {
473 struct switchdev_attr attr = {
474 .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
475 .flags = SWITCHDEV_F_NO_RECURSE,
476 };
477
478 ret = switchdev_port_attr_get(netdev, &attr);
479 if (!ret)
480 ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
481 attr.u.ppid.id);
482 }
483 rtnl_unlock();
484
485 return ret;
486}
487static DEVICE_ATTR_RO(phys_switch_id);
488
489static struct attribute *net_class_attrs[] = {
490 &dev_attr_netdev_group.attr,
491 &dev_attr_type.attr,
492 &dev_attr_dev_id.attr,
493 &dev_attr_dev_port.attr,
494 &dev_attr_iflink.attr,
495 &dev_attr_ifindex.attr,
496 &dev_attr_name_assign_type.attr,
497 &dev_attr_addr_assign_type.attr,
498 &dev_attr_addr_len.attr,
499 &dev_attr_link_mode.attr,
500 &dev_attr_address.attr,
501 &dev_attr_broadcast.attr,
502 &dev_attr_speed.attr,
503 &dev_attr_duplex.attr,
504 &dev_attr_dormant.attr,
505 &dev_attr_operstate.attr,
506 &dev_attr_carrier_changes.attr,
507 &dev_attr_ifalias.attr,
508 &dev_attr_carrier.attr,
509 &dev_attr_mtu.attr,
510 &dev_attr_flags.attr,
511 &dev_attr_tx_queue_len.attr,
512 &dev_attr_gro_flush_timeout.attr,
513 &dev_attr_phys_port_id.attr,
514 &dev_attr_phys_port_name.attr,
515 &dev_attr_phys_switch_id.attr,
516 &dev_attr_proto_down.attr,
517 NULL,
518};
519ATTRIBUTE_GROUPS(net_class);
520
521
522static ssize_t netstat_show(const struct device *d,
523 struct device_attribute *attr, char *buf,
524 unsigned long offset)
525{
526 struct net_device *dev = to_net_dev(d);
527 ssize_t ret = -EINVAL;
528
529 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
530 offset % sizeof(u64) != 0);
531
532 read_lock(&dev_base_lock);
533 if (dev_isalive(dev)) {
534 struct rtnl_link_stats64 temp;
535 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
536
537 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
538 }
539 read_unlock(&dev_base_lock);
540 return ret;
541}
542
543
544#define NETSTAT_ENTRY(name) \
545static ssize_t name##_show(struct device *d, \
546 struct device_attribute *attr, char *buf) \
547{ \
548 return netstat_show(d, attr, buf, \
549 offsetof(struct rtnl_link_stats64, name)); \
550} \
551static DEVICE_ATTR_RO(name)
552
553NETSTAT_ENTRY(rx_packets);
554NETSTAT_ENTRY(tx_packets);
555NETSTAT_ENTRY(rx_bytes);
556NETSTAT_ENTRY(tx_bytes);
557NETSTAT_ENTRY(rx_errors);
558NETSTAT_ENTRY(tx_errors);
559NETSTAT_ENTRY(rx_dropped);
560NETSTAT_ENTRY(tx_dropped);
561NETSTAT_ENTRY(multicast);
562NETSTAT_ENTRY(collisions);
563NETSTAT_ENTRY(rx_length_errors);
564NETSTAT_ENTRY(rx_over_errors);
565NETSTAT_ENTRY(rx_crc_errors);
566NETSTAT_ENTRY(rx_frame_errors);
567NETSTAT_ENTRY(rx_fifo_errors);
568NETSTAT_ENTRY(rx_missed_errors);
569NETSTAT_ENTRY(tx_aborted_errors);
570NETSTAT_ENTRY(tx_carrier_errors);
571NETSTAT_ENTRY(tx_fifo_errors);
572NETSTAT_ENTRY(tx_heartbeat_errors);
573NETSTAT_ENTRY(tx_window_errors);
574NETSTAT_ENTRY(rx_compressed);
575NETSTAT_ENTRY(tx_compressed);
576
577static struct attribute *netstat_attrs[] = {
578 &dev_attr_rx_packets.attr,
579 &dev_attr_tx_packets.attr,
580 &dev_attr_rx_bytes.attr,
581 &dev_attr_tx_bytes.attr,
582 &dev_attr_rx_errors.attr,
583 &dev_attr_tx_errors.attr,
584 &dev_attr_rx_dropped.attr,
585 &dev_attr_tx_dropped.attr,
586 &dev_attr_multicast.attr,
587 &dev_attr_collisions.attr,
588 &dev_attr_rx_length_errors.attr,
589 &dev_attr_rx_over_errors.attr,
590 &dev_attr_rx_crc_errors.attr,
591 &dev_attr_rx_frame_errors.attr,
592 &dev_attr_rx_fifo_errors.attr,
593 &dev_attr_rx_missed_errors.attr,
594 &dev_attr_tx_aborted_errors.attr,
595 &dev_attr_tx_carrier_errors.attr,
596 &dev_attr_tx_fifo_errors.attr,
597 &dev_attr_tx_heartbeat_errors.attr,
598 &dev_attr_tx_window_errors.attr,
599 &dev_attr_rx_compressed.attr,
600 &dev_attr_tx_compressed.attr,
601 NULL
602};
603
604
605static struct attribute_group netstat_group = {
606 .name = "statistics",
607 .attrs = netstat_attrs,
608};
609
610#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
611static struct attribute *wireless_attrs[] = {
612 NULL
613};
614
615static struct attribute_group wireless_group = {
616 .name = "wireless",
617 .attrs = wireless_attrs,
618};
619#endif
620
621#else
622#define net_class_groups NULL
623#endif
624
625#ifdef CONFIG_SYSFS
626#define to_rx_queue_attr(_attr) container_of(_attr, \
627 struct rx_queue_attribute, attr)
628
629#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
630
631static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
632 char *buf)
633{
634 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
635 struct netdev_rx_queue *queue = to_rx_queue(kobj);
636
637 if (!attribute->show)
638 return -EIO;
639
640 return attribute->show(queue, attribute, buf);
641}
642
643static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
644 const char *buf, size_t count)
645{
646 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
647 struct netdev_rx_queue *queue = to_rx_queue(kobj);
648
649 if (!attribute->store)
650 return -EIO;
651
652 return attribute->store(queue, attribute, buf, count);
653}
654
655static const struct sysfs_ops rx_queue_sysfs_ops = {
656 .show = rx_queue_attr_show,
657 .store = rx_queue_attr_store,
658};
659
660#ifdef CONFIG_RPS
661static ssize_t show_rps_map(struct netdev_rx_queue *queue,
662 struct rx_queue_attribute *attribute, char *buf)
663{
664 struct rps_map *map;
665 cpumask_var_t mask;
666 int i, len;
667
668 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
669 return -ENOMEM;
670
671 rcu_read_lock();
672 map = rcu_dereference(queue->rps_map);
673 if (map)
674 for (i = 0; i < map->len; i++)
675 cpumask_set_cpu(map->cpus[i], mask);
676
677 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
678 rcu_read_unlock();
679 free_cpumask_var(mask);
680
681 return len < PAGE_SIZE ? len : -EINVAL;
682}
683
684static ssize_t store_rps_map(struct netdev_rx_queue *queue,
685 struct rx_queue_attribute *attribute,
686 const char *buf, size_t len)
687{
688 struct rps_map *old_map, *map;
689 cpumask_var_t mask;
690 int err, cpu, i;
691 static DEFINE_MUTEX(rps_map_mutex);
692
693 if (!capable(CAP_NET_ADMIN))
694 return -EPERM;
695
696 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
697 return -ENOMEM;
698
699 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
700 if (err) {
701 free_cpumask_var(mask);
702 return err;
703 }
704
705 map = kzalloc(max_t(unsigned int,
706 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
707 GFP_KERNEL);
708 if (!map) {
709 free_cpumask_var(mask);
710 return -ENOMEM;
711 }
712
713 i = 0;
714 for_each_cpu_and(cpu, mask, cpu_online_mask)
715 map->cpus[i++] = cpu;
716
717 if (i)
718 map->len = i;
719 else {
720 kfree(map);
721 map = NULL;
722 }
723
724 mutex_lock(&rps_map_mutex);
725 old_map = rcu_dereference_protected(queue->rps_map,
726 mutex_is_locked(&rps_map_mutex));
727 rcu_assign_pointer(queue->rps_map, map);
728
729 if (map)
730 static_key_slow_inc(&rps_needed);
731 if (old_map)
732 static_key_slow_dec(&rps_needed);
733
734 mutex_unlock(&rps_map_mutex);
735
736 if (old_map)
737 kfree_rcu(old_map, rcu);
738
739 free_cpumask_var(mask);
740 return len;
741}
742
743static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
744 struct rx_queue_attribute *attr,
745 char *buf)
746{
747 struct rps_dev_flow_table *flow_table;
748 unsigned long val = 0;
749
750 rcu_read_lock();
751 flow_table = rcu_dereference(queue->rps_flow_table);
752 if (flow_table)
753 val = (unsigned long)flow_table->mask + 1;
754 rcu_read_unlock();
755
756 return sprintf(buf, "%lu\n", val);
757}
758
759static void rps_dev_flow_table_release(struct rcu_head *rcu)
760{
761 struct rps_dev_flow_table *table = container_of(rcu,
762 struct rps_dev_flow_table, rcu);
763 vfree(table);
764}
765
766static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
767 struct rx_queue_attribute *attr,
768 const char *buf, size_t len)
769{
770 unsigned long mask, count;
771 struct rps_dev_flow_table *table, *old_table;
772 static DEFINE_SPINLOCK(rps_dev_flow_lock);
773 int rc;
774
775 if (!capable(CAP_NET_ADMIN))
776 return -EPERM;
777
778 rc = kstrtoul(buf, 0, &count);
779 if (rc < 0)
780 return rc;
781
782 if (count) {
783 mask = count - 1;
784
785
786
787 while ((mask | (mask >> 1)) != mask)
788 mask |= (mask >> 1);
789
790
791
792
793#if BITS_PER_LONG > 32
794 if (mask > (unsigned long)(u32)mask)
795 return -EINVAL;
796#else
797 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
798 / sizeof(struct rps_dev_flow)) {
799
800 return -EINVAL;
801 }
802#endif
803 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
804 if (!table)
805 return -ENOMEM;
806
807 table->mask = mask;
808 for (count = 0; count <= mask; count++)
809 table->flows[count].cpu = RPS_NO_CPU;
810 } else
811 table = NULL;
812
813 spin_lock(&rps_dev_flow_lock);
814 old_table = rcu_dereference_protected(queue->rps_flow_table,
815 lockdep_is_held(&rps_dev_flow_lock));
816 rcu_assign_pointer(queue->rps_flow_table, table);
817 spin_unlock(&rps_dev_flow_lock);
818
819 if (old_table)
820 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
821
822 return len;
823}
824
825static struct rx_queue_attribute rps_cpus_attribute =
826 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
827
828
829static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
830 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
831 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
832#endif
833
834static struct attribute *rx_queue_default_attrs[] = {
835#ifdef CONFIG_RPS
836 &rps_cpus_attribute.attr,
837 &rps_dev_flow_table_cnt_attribute.attr,
838#endif
839 NULL
840};
841
842static void rx_queue_release(struct kobject *kobj)
843{
844 struct netdev_rx_queue *queue = to_rx_queue(kobj);
845#ifdef CONFIG_RPS
846 struct rps_map *map;
847 struct rps_dev_flow_table *flow_table;
848
849
850 map = rcu_dereference_protected(queue->rps_map, 1);
851 if (map) {
852 RCU_INIT_POINTER(queue->rps_map, NULL);
853 kfree_rcu(map, rcu);
854 }
855
856 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
857 if (flow_table) {
858 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
859 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
860 }
861#endif
862
863 memset(kobj, 0, sizeof(*kobj));
864 dev_put(queue->dev);
865}
866
867static const void *rx_queue_namespace(struct kobject *kobj)
868{
869 struct netdev_rx_queue *queue = to_rx_queue(kobj);
870 struct device *dev = &queue->dev->dev;
871 const void *ns = NULL;
872
873 if (dev->class && dev->class->ns_type)
874 ns = dev->class->namespace(dev);
875
876 return ns;
877}
878
879static struct kobj_type rx_queue_ktype = {
880 .sysfs_ops = &rx_queue_sysfs_ops,
881 .release = rx_queue_release,
882 .default_attrs = rx_queue_default_attrs,
883 .namespace = rx_queue_namespace
884};
885
886static int rx_queue_add_kobject(struct net_device *dev, int index)
887{
888 struct netdev_rx_queue *queue = dev->_rx + index;
889 struct kobject *kobj = &queue->kobj;
890 int error = 0;
891
892 kobj->kset = dev->queues_kset;
893 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
894 "rx-%u", index);
895 if (error)
896 goto exit;
897
898 if (dev->sysfs_rx_queue_group) {
899 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
900 if (error)
901 goto exit;
902 }
903
904 kobject_uevent(kobj, KOBJ_ADD);
905 dev_hold(queue->dev);
906
907 return error;
908exit:
909 kobject_put(kobj);
910 return error;
911}
912#endif
913
914int
915net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
916{
917#ifdef CONFIG_SYSFS
918 int i;
919 int error = 0;
920
921#ifndef CONFIG_RPS
922 if (!dev->sysfs_rx_queue_group)
923 return 0;
924#endif
925 for (i = old_num; i < new_num; i++) {
926 error = rx_queue_add_kobject(dev, i);
927 if (error) {
928 new_num = old_num;
929 break;
930 }
931 }
932
933 while (--i >= new_num) {
934 if (dev->sysfs_rx_queue_group)
935 sysfs_remove_group(&dev->_rx[i].kobj,
936 dev->sysfs_rx_queue_group);
937 kobject_put(&dev->_rx[i].kobj);
938 }
939
940 return error;
941#else
942 return 0;
943#endif
944}
945
946#ifdef CONFIG_SYSFS
947
948
949
950struct netdev_queue_attribute {
951 struct attribute attr;
952 ssize_t (*show)(struct netdev_queue *queue,
953 struct netdev_queue_attribute *attr, char *buf);
954 ssize_t (*store)(struct netdev_queue *queue,
955 struct netdev_queue_attribute *attr, const char *buf, size_t len);
956};
957#define to_netdev_queue_attr(_attr) container_of(_attr, \
958 struct netdev_queue_attribute, attr)
959
960#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
961
962static ssize_t netdev_queue_attr_show(struct kobject *kobj,
963 struct attribute *attr, char *buf)
964{
965 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
966 struct netdev_queue *queue = to_netdev_queue(kobj);
967
968 if (!attribute->show)
969 return -EIO;
970
971 return attribute->show(queue, attribute, buf);
972}
973
974static ssize_t netdev_queue_attr_store(struct kobject *kobj,
975 struct attribute *attr,
976 const char *buf, size_t count)
977{
978 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
979 struct netdev_queue *queue = to_netdev_queue(kobj);
980
981 if (!attribute->store)
982 return -EIO;
983
984 return attribute->store(queue, attribute, buf, count);
985}
986
987static const struct sysfs_ops netdev_queue_sysfs_ops = {
988 .show = netdev_queue_attr_show,
989 .store = netdev_queue_attr_store,
990};
991
992static ssize_t show_trans_timeout(struct netdev_queue *queue,
993 struct netdev_queue_attribute *attribute,
994 char *buf)
995{
996 unsigned long trans_timeout;
997
998 spin_lock_irq(&queue->_xmit_lock);
999 trans_timeout = queue->trans_timeout;
1000 spin_unlock_irq(&queue->_xmit_lock);
1001
1002 return sprintf(buf, "%lu", trans_timeout);
1003}
1004
1005#ifdef CONFIG_XPS
1006static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1007{
1008 struct net_device *dev = queue->dev;
1009 int i;
1010
1011 for (i = 0; i < dev->num_tx_queues; i++)
1012 if (queue == &dev->_tx[i])
1013 break;
1014
1015 BUG_ON(i >= dev->num_tx_queues);
1016
1017 return i;
1018}
1019
1020static ssize_t show_tx_maxrate(struct netdev_queue *queue,
1021 struct netdev_queue_attribute *attribute,
1022 char *buf)
1023{
1024 return sprintf(buf, "%lu\n", queue->tx_maxrate);
1025}
1026
1027static ssize_t set_tx_maxrate(struct netdev_queue *queue,
1028 struct netdev_queue_attribute *attribute,
1029 const char *buf, size_t len)
1030{
1031 struct net_device *dev = queue->dev;
1032 int err, index = get_netdev_queue_index(queue);
1033 u32 rate = 0;
1034
1035 err = kstrtou32(buf, 10, &rate);
1036 if (err < 0)
1037 return err;
1038
1039 if (!rtnl_trylock())
1040 return restart_syscall();
1041
1042 err = -EOPNOTSUPP;
1043 if (dev->netdev_ops->ndo_set_tx_maxrate)
1044 err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1045
1046 rtnl_unlock();
1047 if (!err) {
1048 queue->tx_maxrate = rate;
1049 return len;
1050 }
1051 return err;
1052}
1053
1054static struct netdev_queue_attribute queue_tx_maxrate =
1055 __ATTR(tx_maxrate, S_IRUGO | S_IWUSR,
1056 show_tx_maxrate, set_tx_maxrate);
1057#endif
1058
1059static struct netdev_queue_attribute queue_trans_timeout =
1060 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
1061
1062#ifdef CONFIG_BQL
1063
1064
1065
1066static ssize_t bql_show(char *buf, unsigned int value)
1067{
1068 return sprintf(buf, "%u\n", value);
1069}
1070
1071static ssize_t bql_set(const char *buf, const size_t count,
1072 unsigned int *pvalue)
1073{
1074 unsigned int value;
1075 int err;
1076
1077 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
1078 value = DQL_MAX_LIMIT;
1079 else {
1080 err = kstrtouint(buf, 10, &value);
1081 if (err < 0)
1082 return err;
1083 if (value > DQL_MAX_LIMIT)
1084 return -EINVAL;
1085 }
1086
1087 *pvalue = value;
1088
1089 return count;
1090}
1091
1092static ssize_t bql_show_hold_time(struct netdev_queue *queue,
1093 struct netdev_queue_attribute *attr,
1094 char *buf)
1095{
1096 struct dql *dql = &queue->dql;
1097
1098 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1099}
1100
1101static ssize_t bql_set_hold_time(struct netdev_queue *queue,
1102 struct netdev_queue_attribute *attribute,
1103 const char *buf, size_t len)
1104{
1105 struct dql *dql = &queue->dql;
1106 unsigned int value;
1107 int err;
1108
1109 err = kstrtouint(buf, 10, &value);
1110 if (err < 0)
1111 return err;
1112
1113 dql->slack_hold_time = msecs_to_jiffies(value);
1114
1115 return len;
1116}
1117
1118static struct netdev_queue_attribute bql_hold_time_attribute =
1119 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1120 bql_set_hold_time);
1121
1122static ssize_t bql_show_inflight(struct netdev_queue *queue,
1123 struct netdev_queue_attribute *attr,
1124 char *buf)
1125{
1126 struct dql *dql = &queue->dql;
1127
1128 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1129}
1130
1131static struct netdev_queue_attribute bql_inflight_attribute =
1132 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1133
1134#define BQL_ATTR(NAME, FIELD) \
1135static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1136 struct netdev_queue_attribute *attr, \
1137 char *buf) \
1138{ \
1139 return bql_show(buf, queue->dql.FIELD); \
1140} \
1141 \
1142static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1143 struct netdev_queue_attribute *attr, \
1144 const char *buf, size_t len) \
1145{ \
1146 return bql_set(buf, len, &queue->dql.FIELD); \
1147} \
1148 \
1149static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
1150 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
1151 bql_set_ ## NAME);
1152
1153BQL_ATTR(limit, limit)
1154BQL_ATTR(limit_max, max_limit)
1155BQL_ATTR(limit_min, min_limit)
1156
1157static struct attribute *dql_attrs[] = {
1158 &bql_limit_attribute.attr,
1159 &bql_limit_max_attribute.attr,
1160 &bql_limit_min_attribute.attr,
1161 &bql_hold_time_attribute.attr,
1162 &bql_inflight_attribute.attr,
1163 NULL
1164};
1165
1166static struct attribute_group dql_group = {
1167 .name = "byte_queue_limits",
1168 .attrs = dql_attrs,
1169};
1170#endif
1171
1172#ifdef CONFIG_XPS
1173static ssize_t show_xps_map(struct netdev_queue *queue,
1174 struct netdev_queue_attribute *attribute, char *buf)
1175{
1176 struct net_device *dev = queue->dev;
1177 struct xps_dev_maps *dev_maps;
1178 cpumask_var_t mask;
1179 unsigned long index;
1180 int i, len;
1181
1182 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1183 return -ENOMEM;
1184
1185 index = get_netdev_queue_index(queue);
1186
1187 rcu_read_lock();
1188 dev_maps = rcu_dereference(dev->xps_maps);
1189 if (dev_maps) {
1190 for_each_possible_cpu(i) {
1191 struct xps_map *map =
1192 rcu_dereference(dev_maps->cpu_map[i]);
1193 if (map) {
1194 int j;
1195 for (j = 0; j < map->len; j++) {
1196 if (map->queues[j] == index) {
1197 cpumask_set_cpu(i, mask);
1198 break;
1199 }
1200 }
1201 }
1202 }
1203 }
1204 rcu_read_unlock();
1205
1206 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1207 free_cpumask_var(mask);
1208 return len < PAGE_SIZE ? len : -EINVAL;
1209}
1210
1211static ssize_t store_xps_map(struct netdev_queue *queue,
1212 struct netdev_queue_attribute *attribute,
1213 const char *buf, size_t len)
1214{
1215 struct net_device *dev = queue->dev;
1216 unsigned long index;
1217 cpumask_var_t mask;
1218 int err;
1219
1220 if (!capable(CAP_NET_ADMIN))
1221 return -EPERM;
1222
1223 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1224 return -ENOMEM;
1225
1226 index = get_netdev_queue_index(queue);
1227
1228 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1229 if (err) {
1230 free_cpumask_var(mask);
1231 return err;
1232 }
1233
1234 err = netif_set_xps_queue(dev, mask, index);
1235
1236 free_cpumask_var(mask);
1237
1238 return err ? : len;
1239}
1240
1241static struct netdev_queue_attribute xps_cpus_attribute =
1242 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1243#endif
1244
1245static struct attribute *netdev_queue_default_attrs[] = {
1246 &queue_trans_timeout.attr,
1247#ifdef CONFIG_XPS
1248 &xps_cpus_attribute.attr,
1249 &queue_tx_maxrate.attr,
1250#endif
1251 NULL
1252};
1253
1254static void netdev_queue_release(struct kobject *kobj)
1255{
1256 struct netdev_queue *queue = to_netdev_queue(kobj);
1257
1258 memset(kobj, 0, sizeof(*kobj));
1259 dev_put(queue->dev);
1260}
1261
1262static const void *netdev_queue_namespace(struct kobject *kobj)
1263{
1264 struct netdev_queue *queue = to_netdev_queue(kobj);
1265 struct device *dev = &queue->dev->dev;
1266 const void *ns = NULL;
1267
1268 if (dev->class && dev->class->ns_type)
1269 ns = dev->class->namespace(dev);
1270
1271 return ns;
1272}
1273
1274static struct kobj_type netdev_queue_ktype = {
1275 .sysfs_ops = &netdev_queue_sysfs_ops,
1276 .release = netdev_queue_release,
1277 .default_attrs = netdev_queue_default_attrs,
1278 .namespace = netdev_queue_namespace,
1279};
1280
1281static int netdev_queue_add_kobject(struct net_device *dev, int index)
1282{
1283 struct netdev_queue *queue = dev->_tx + index;
1284 struct kobject *kobj = &queue->kobj;
1285 int error = 0;
1286
1287 kobj->kset = dev->queues_kset;
1288 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1289 "tx-%u", index);
1290 if (error)
1291 goto exit;
1292
1293#ifdef CONFIG_BQL
1294 error = sysfs_create_group(kobj, &dql_group);
1295 if (error)
1296 goto exit;
1297#endif
1298
1299 kobject_uevent(kobj, KOBJ_ADD);
1300 dev_hold(queue->dev);
1301
1302 return 0;
1303exit:
1304 kobject_put(kobj);
1305 return error;
1306}
1307#endif
1308
1309int
1310netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1311{
1312#ifdef CONFIG_SYSFS
1313 int i;
1314 int error = 0;
1315
1316 for (i = old_num; i < new_num; i++) {
1317 error = netdev_queue_add_kobject(dev, i);
1318 if (error) {
1319 new_num = old_num;
1320 break;
1321 }
1322 }
1323
1324 while (--i >= new_num) {
1325 struct netdev_queue *queue = dev->_tx + i;
1326
1327#ifdef CONFIG_BQL
1328 sysfs_remove_group(&queue->kobj, &dql_group);
1329#endif
1330 kobject_put(&queue->kobj);
1331 }
1332
1333 return error;
1334#else
1335 return 0;
1336#endif
1337}
1338
1339static int register_queue_kobjects(struct net_device *dev)
1340{
1341 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1342
1343#ifdef CONFIG_SYSFS
1344 dev->queues_kset = kset_create_and_add("queues",
1345 NULL, &dev->dev.kobj);
1346 if (!dev->queues_kset)
1347 return -ENOMEM;
1348 real_rx = dev->real_num_rx_queues;
1349#endif
1350 real_tx = dev->real_num_tx_queues;
1351
1352 error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1353 if (error)
1354 goto error;
1355 rxq = real_rx;
1356
1357 error = netdev_queue_update_kobjects(dev, 0, real_tx);
1358 if (error)
1359 goto error;
1360 txq = real_tx;
1361
1362 return 0;
1363
1364error:
1365 netdev_queue_update_kobjects(dev, txq, 0);
1366 net_rx_queue_update_kobjects(dev, rxq, 0);
1367 return error;
1368}
1369
1370static void remove_queue_kobjects(struct net_device *dev)
1371{
1372 int real_rx = 0, real_tx = 0;
1373
1374#ifdef CONFIG_SYSFS
1375 real_rx = dev->real_num_rx_queues;
1376#endif
1377 real_tx = dev->real_num_tx_queues;
1378
1379 net_rx_queue_update_kobjects(dev, real_rx, 0);
1380 netdev_queue_update_kobjects(dev, real_tx, 0);
1381#ifdef CONFIG_SYSFS
1382 kset_unregister(dev->queues_kset);
1383#endif
1384}
1385
1386static bool net_current_may_mount(void)
1387{
1388 struct net *net = current->nsproxy->net_ns;
1389
1390 return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1391}
1392
1393static void *net_grab_current_ns(void)
1394{
1395 struct net *ns = current->nsproxy->net_ns;
1396#ifdef CONFIG_NET_NS
1397 if (ns)
1398 atomic_inc(&ns->passive);
1399#endif
1400 return ns;
1401}
1402
1403static const void *net_initial_ns(void)
1404{
1405 return &init_net;
1406}
1407
1408static const void *net_netlink_ns(struct sock *sk)
1409{
1410 return sock_net(sk);
1411}
1412
1413struct kobj_ns_type_operations net_ns_type_operations = {
1414 .type = KOBJ_NS_TYPE_NET,
1415 .current_may_mount = net_current_may_mount,
1416 .grab_current_ns = net_grab_current_ns,
1417 .netlink_ns = net_netlink_ns,
1418 .initial_ns = net_initial_ns,
1419 .drop_ns = net_drop_ns,
1420};
1421EXPORT_SYMBOL_GPL(net_ns_type_operations);
1422
1423static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1424{
1425 struct net_device *dev = to_net_dev(d);
1426 int retval;
1427
1428
1429 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1430 if (retval)
1431 goto exit;
1432
1433
1434
1435
1436 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1437
1438exit:
1439 return retval;
1440}
1441
1442
1443
1444
1445
1446static void netdev_release(struct device *d)
1447{
1448 struct net_device *dev = to_net_dev(d);
1449
1450 BUG_ON(dev->reg_state != NETREG_RELEASED);
1451
1452 kfree(dev->ifalias);
1453 netdev_freemem(dev);
1454}
1455
1456static const void *net_namespace(struct device *d)
1457{
1458 struct net_device *dev;
1459 dev = container_of(d, struct net_device, dev);
1460 return dev_net(dev);
1461}
1462
1463static struct class net_class = {
1464 .name = "net",
1465 .dev_release = netdev_release,
1466 .dev_groups = net_class_groups,
1467 .dev_uevent = netdev_uevent,
1468 .ns_type = &net_ns_type_operations,
1469 .namespace = net_namespace,
1470};
1471
1472#ifdef CONFIG_OF_NET
1473static int of_dev_node_match(struct device *dev, const void *data)
1474{
1475 int ret = 0;
1476
1477 if (dev->parent)
1478 ret = dev->parent->of_node == data;
1479
1480 return ret == 0 ? dev->of_node == data : ret;
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492struct net_device *of_find_net_device_by_node(struct device_node *np)
1493{
1494 struct device *dev;
1495
1496 dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1497 if (!dev)
1498 return NULL;
1499
1500 return to_net_dev(dev);
1501}
1502EXPORT_SYMBOL(of_find_net_device_by_node);
1503#endif
1504
1505
1506
1507
1508void netdev_unregister_kobject(struct net_device *ndev)
1509{
1510 struct device *dev = &(ndev->dev);
1511
1512 kobject_get(&dev->kobj);
1513
1514 remove_queue_kobjects(ndev);
1515
1516 pm_runtime_set_memalloc_noio(dev, false);
1517
1518 device_del(dev);
1519}
1520
1521
1522int netdev_register_kobject(struct net_device *ndev)
1523{
1524 struct device *dev = &(ndev->dev);
1525 const struct attribute_group **groups = ndev->sysfs_groups;
1526 int error = 0;
1527
1528 device_initialize(dev);
1529 dev->class = &net_class;
1530 dev->platform_data = ndev;
1531 dev->groups = groups;
1532
1533 dev_set_name(dev, "%s", ndev->name);
1534
1535#ifdef CONFIG_SYSFS
1536
1537 if (*groups)
1538 groups++;
1539
1540 *groups++ = &netstat_group;
1541
1542#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1543 if (ndev->ieee80211_ptr)
1544 *groups++ = &wireless_group;
1545#if IS_ENABLED(CONFIG_WIRELESS_EXT)
1546 else if (ndev->wireless_handlers)
1547 *groups++ = &wireless_group;
1548#endif
1549#endif
1550#endif
1551
1552 error = device_add(dev);
1553 if (error)
1554 return error;
1555
1556 error = register_queue_kobjects(ndev);
1557 if (error) {
1558 device_del(dev);
1559 return error;
1560 }
1561
1562 pm_runtime_set_memalloc_noio(dev, true);
1563
1564 return error;
1565}
1566
1567int netdev_class_create_file_ns(struct class_attribute *class_attr,
1568 const void *ns)
1569{
1570 return class_create_file_ns(&net_class, class_attr, ns);
1571}
1572EXPORT_SYMBOL(netdev_class_create_file_ns);
1573
1574void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1575 const void *ns)
1576{
1577 class_remove_file_ns(&net_class, class_attr, ns);
1578}
1579EXPORT_SYMBOL(netdev_class_remove_file_ns);
1580
1581int __init netdev_kobject_init(void)
1582{
1583 kobj_ns_type_register(&net_ns_type_operations);
1584 return class_register(&net_class);
1585}
1586